#include <stdint.h>
+#define __AARCH64_UINT64_C(__C) ((uint64_t) __C)
+#define __AARCH64_INT64_C(__C) ((int64_t) __C)
+
typedef __builtin_aarch64_simd_qi int8x8_t
__attribute__ ((__vector_size__ (8)));
typedef __builtin_aarch64_simd_hi int16x4_t
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vmovn_high_s16 (int8x8_t a, int16x8_t b)
{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("xtn2 %0.16b,%1.8h"
: "+w"(result)
: "w"(b)
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vmovn_high_s32 (int16x4_t a, int32x4_t b)
{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("xtn2 %0.8h,%1.4s"
: "+w"(result)
: "w"(b)
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vmovn_high_s64 (int32x2_t a, int64x2_t b)
{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("xtn2 %0.4s,%1.2d"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vmovn_high_u16 (uint8x8_t a, uint16x8_t b)
{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("xtn2 %0.16b,%1.8h"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vmovn_high_u32 (uint16x4_t a, uint32x4_t b)
{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("xtn2 %0.8h,%1.4s"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vmovn_high_u64 (uint32x2_t a, uint64x2_t b)
{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("xtn2 %0.4s,%1.2d"
: "+w"(result)
: "w"(b)
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vqmovn_high_s16 (int8x8_t a, int16x8_t b)
{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("sqxtn2 %0.16b, %1.8h"
: "+w"(result)
: "w"(b)
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vqmovn_high_s32 (int16x4_t a, int32x4_t b)
{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("sqxtn2 %0.8h, %1.4s"
: "+w"(result)
: "w"(b)
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqmovn_high_s64 (int32x2_t a, int64x2_t b)
{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("sqxtn2 %0.4s, %1.2d"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqmovn_high_u16 (uint8x8_t a, uint16x8_t b)
{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("uqxtn2 %0.16b, %1.8h"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqmovn_high_u32 (uint16x4_t a, uint32x4_t b)
{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("uqxtn2 %0.8h, %1.4s"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqmovn_high_u64 (uint32x2_t a, uint64x2_t b)
{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("uqxtn2 %0.4s, %1.2d"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vqmovun_high_s16 (uint8x8_t a, int16x8_t b)
{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("sqxtun2 %0.16b, %1.8h"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vqmovun_high_s32 (uint16x4_t a, int32x4_t b)
{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("sqxtun2 %0.8h, %1.4s"
: "+w"(result)
: "w"(b)
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vqmovun_high_s64 (uint32x2_t a, int64x2_t b)
{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("sqxtun2 %0.4s, %1.2d"
: "+w"(result)
: "w"(b)
int16x8_t b_ = (b); \
int8x8_t a_ = (a); \
int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqrshrn2 %0.16b, %1.8h, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int32x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqrshrn2 %0.8h, %1.4s, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int64x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqrshrn2 %0.4s, %1.2d, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("uqrshrn2 %0.16b, %1.8h, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("uqrshrn2 %0.8h, %1.4s, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("uqrshrn2 %0.4s, %1.2d, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqrshrun2 %0.16b, %1.8h, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqrshrun2 %0.8h, %1.4s, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqrshrun2 %0.4s, %1.2d, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int16x8_t b_ = (b); \
int8x8_t a_ = (a); \
int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqshrn2 %0.16b, %1.8h, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int32x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqshrn2 %0.8h, %1.4s, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int64x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqshrn2 %0.4s, %1.2d, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("uqshrn2 %0.16b, %1.8h, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("uqshrn2 %0.8h, %1.4s, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("uqshrn2 %0.4s, %1.2d, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqshrun2 %0.16b, %1.8h, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqshrun2 %0.8h, %1.4s, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("sqshrun2 %0.4s, %1.2d, #%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int16x8_t b_ = (b); \
int8x8_t a_ = (a); \
int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int32x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int64x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("rshrn2 %0.16b,%1.8h,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("rshrn2 %0.8h,%1.4s,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("rshrn2 %0.4s,%1.2d,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vrsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vrsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vrsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vrsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("rsubhn2 %0.16b, %1.8h, %2.8h"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vrsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("rsubhn2 %0.8h, %1.4s, %2.4s"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vrsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("rsubhn2 %0.4s, %1.2d, %2.2d"
: "+w"(result)
: "w"(b), "w"(c)
int16x8_t b_ = (b); \
int8x8_t a_ = (a); \
int8x16_t result = vcombine_s8 \
- (a_, vcreate_s8 (UINT64_C (0x0))); \
+ (a_, vcreate_s8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.16b,%1.8h,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int32x4_t b_ = (b); \
int16x4_t a_ = (a); \
int16x8_t result = vcombine_s16 \
- (a_, vcreate_s16 (UINT64_C (0x0))); \
+ (a_, vcreate_s16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.8h,%1.4s,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
int64x2_t b_ = (b); \
int32x2_t a_ = (a); \
int32x4_t result = vcombine_s32 \
- (a_, vcreate_s32 (UINT64_C (0x0))); \
+ (a_, vcreate_s32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.4s,%1.2d,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint16x8_t b_ = (b); \
uint8x8_t a_ = (a); \
uint8x16_t result = vcombine_u8 \
- (a_, vcreate_u8 (UINT64_C (0x0))); \
+ (a_, vcreate_u8 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.16b,%1.8h,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint32x4_t b_ = (b); \
uint16x4_t a_ = (a); \
uint16x8_t result = vcombine_u16 \
- (a_, vcreate_u16 (UINT64_C (0x0))); \
+ (a_, vcreate_u16 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.8h,%1.4s,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
uint64x2_t b_ = (b); \
uint32x2_t a_ = (a); \
uint32x4_t result = vcombine_u32 \
- (a_, vcreate_u32 (UINT64_C (0x0))); \
+ (a_, vcreate_u32 \
+ (__AARCH64_UINT64_C (0x0))); \
__asm__ ("shrn2 %0.4s,%1.2d,#%2" \
: "+w"(result) \
: "w"(b_), "i"(c) \
__extension__ static __inline int8x16_t __attribute__ ((__always_inline__))
vsubhn_high_s16 (int8x8_t a, int16x8_t b, int16x8_t c)
{
- int8x16_t result = vcombine_s8 (a, vcreate_s8 (UINT64_C (0x0)));
+ int8x16_t result = vcombine_s8 (a, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline int16x8_t __attribute__ ((__always_inline__))
vsubhn_high_s32 (int16x4_t a, int32x4_t b, int32x4_t c)
{
- int16x8_t result = vcombine_s16 (a, vcreate_s16 (UINT64_C (0x0)));
+ int16x8_t result = vcombine_s16 (a, vcreate_s16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vsubhn_high_s64 (int32x2_t a, int64x2_t b, int64x2_t c)
{
- int32x4_t result = vcombine_s32 (a, vcreate_s32 (UINT64_C (0x0)));
+ int32x4_t result = vcombine_s32 (a, vcreate_s32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline uint8x16_t __attribute__ ((__always_inline__))
vsubhn_high_u16 (uint8x8_t a, uint16x8_t b, uint16x8_t c)
{
- uint8x16_t result = vcombine_u8 (a, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t result = vcombine_u8 (a, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("subhn2 %0.16b, %1.8h, %2.8h"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline uint16x8_t __attribute__ ((__always_inline__))
vsubhn_high_u32 (uint16x4_t a, uint32x4_t b, uint32x4_t c)
{
- uint16x8_t result = vcombine_u16 (a, vcreate_u16 (UINT64_C (0x0)));
+ uint16x8_t result = vcombine_u16 (a, vcreate_u16 (__AARCH64_UINT64_C (0x0)));
__asm__ ("subhn2 %0.8h, %1.4s, %2.4s"
: "+w"(result)
: "w"(b), "w"(c)
__extension__ static __inline uint32x4_t __attribute__ ((__always_inline__))
vsubhn_high_u64 (uint32x2_t a, uint64x2_t b, uint64x2_t c)
{
- uint32x4_t result = vcombine_u32 (a, vcreate_u32 (UINT64_C (0x0)));
+ uint32x4_t result = vcombine_u32 (a, vcreate_u32 (__AARCH64_UINT64_C (0x0)));
__asm__ ("subhn2 %0.4s, %1.2d, %2.2d"
: "+w"(result)
: "w"(b), "w"(c)
vtbl1_s8 (int8x8_t tab, int8x8_t idx)
{
int8x8_t result;
- int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
+ int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
: "=w"(result)
: "w"(temp), "w"(idx)
vtbl1_u8 (uint8x8_t tab, uint8x8_t idx)
{
uint8x8_t result;
- uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
: "=w"(result)
: "w"(temp), "w"(idx)
vtbl1_p8 (poly8x8_t tab, uint8x8_t idx)
{
poly8x8_t result;
- poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
+ poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("tbl %0.8b, {%1.16b}, %2.8b"
: "=w"(result)
: "w"(temp), "w"(idx)
int8x8_t result;
int8x16x2_t temp;
temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
+ temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
"tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
: "=w"(result)
uint8x8_t result;
uint8x16x2_t temp;
temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
+ temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
"tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
: "=w"(result)
poly8x8_t result;
poly8x16x2_t temp;
temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
+ temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("ld1 {v16.16b - v17.16b }, %1\n\t"
"tbl %0.8b, {v16.16b - v17.16b}, %2.8b\n\t"
: "=w"(result)
{
int8x8_t result;
int8x8_t tmp1;
- int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (UINT64_C (0x0)));
+ int8x16_t temp = vcombine_s8 (tab, vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("movi %0.8b, 8\n\t"
"cmhs %0.8b, %3.8b, %0.8b\n\t"
"tbl %1.8b, {%2.16b}, %3.8b\n\t"
{
uint8x8_t result;
uint8x8_t tmp1;
- uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (UINT64_C (0x0)));
+ uint8x16_t temp = vcombine_u8 (tab, vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("movi %0.8b, 8\n\t"
"cmhs %0.8b, %3.8b, %0.8b\n\t"
"tbl %1.8b, {%2.16b}, %3.8b\n\t"
{
poly8x8_t result;
poly8x8_t tmp1;
- poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (UINT64_C (0x0)));
+ poly8x16_t temp = vcombine_p8 (tab, vcreate_p8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("movi %0.8b, 8\n\t"
"cmhs %0.8b, %3.8b, %0.8b\n\t"
"tbl %1.8b, {%2.16b}, %3.8b\n\t"
int8x8_t tmp1;
int8x16x2_t temp;
temp.val[0] = vcombine_s8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (UINT64_C (0x0)));
+ temp.val[1] = vcombine_s8 (tab.val[2], vcreate_s8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
"movi %0.8b, 24\n\t"
"cmhs %0.8b, %3.8b, %0.8b\n\t"
uint8x8_t tmp1;
uint8x16x2_t temp;
temp.val[0] = vcombine_u8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (UINT64_C (0x0)));
+ temp.val[1] = vcombine_u8 (tab.val[2], vcreate_u8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
"movi %0.8b, 24\n\t"
"cmhs %0.8b, %3.8b, %0.8b\n\t"
poly8x8_t tmp1;
poly8x16x2_t temp;
temp.val[0] = vcombine_p8 (tab.val[0], tab.val[1]);
- temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (UINT64_C (0x0)));
+ temp.val[1] = vcombine_p8 (tab.val[2], vcreate_p8 (__AARCH64_UINT64_C (0x0)));
__asm__ ("ld1 {v16.16b - v17.16b}, %2\n\t"
"movi %0.8b, 24\n\t"
"cmhs %0.8b, %3.8b, %0.8b\n\t"
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlal_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
{
- int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
+ int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (__AARCH64_INT64_C (0)));
return __builtin_aarch64_sqdmlal_lanev4hi (__a, __b, __tmp, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlal_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
{
- int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
+ int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (__AARCH64_INT64_C (0)));
return __builtin_aarch64_sqdmlal_lanev2si (__a, __b, __tmp, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmlsl_lane_s16 (int32x4_t __a, int16x4_t __b, int16x4_t __c, int const __d)
{
- int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (INT64_C (0)));
+ int16x8_t __tmp = vcombine_s16 (__c, vcreate_s16 (__AARCH64_INT64_C (0)));
return __builtin_aarch64_sqdmlsl_lanev4hi (__a, __b, __tmp, __d);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmlsl_lane_s32 (int64x2_t __a, int32x2_t __b, int32x2_t __c, int const __d)
{
- int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (INT64_C (0)));
+ int32x4_t __tmp = vcombine_s32 (__c, vcreate_s32 (__AARCH64_INT64_C (0)));
return __builtin_aarch64_sqdmlsl_lanev2si (__a, __b, __tmp, __d);
}
__extension__ static __inline int32x4_t __attribute__ ((__always_inline__))
vqdmull_lane_s16 (int16x4_t __a, int16x4_t __b, int const __c)
{
- int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (INT64_C (0)));
+ int16x8_t __tmp = vcombine_s16 (__b, vcreate_s16 (__AARCH64_INT64_C (0)));
return __builtin_aarch64_sqdmull_lanev4hi (__a, __tmp, __c);
}
__extension__ static __inline int64x2_t __attribute__ ((__always_inline__))
vqdmull_lane_s32 (int32x2_t __a, int32x2_t __b, int const __c)
{
- int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (INT64_C (0)));
+ int32x4_t __tmp = vcombine_s32 (__b, vcreate_s32 (__AARCH64_INT64_C (0)));
return __builtin_aarch64_sqdmull_lanev2si (__a, __tmp, __c);
}
{
__builtin_aarch64_simd_oi __o;
int64x2x2_t temp;
- temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
- temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
__builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
uint64x2x2_t temp;
- temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
- temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv2di (__o, (int64x2_t) temp.val[1], 1);
__builtin_aarch64_st2di ((__builtin_aarch64_simd_di *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
float64x2x2_t temp;
- temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
- temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv2df (__o, (float64x2_t) temp.val[1], 1);
__builtin_aarch64_st2df ((__builtin_aarch64_simd_df *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
int8x16x2_t temp;
- temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
- temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
__builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
poly8x16x2_t temp;
- temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
- temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
__builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
int16x8x2_t temp;
- temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
- temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
__builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
poly16x8x2_t temp;
- temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
- temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
__builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
int32x4x2_t temp;
- temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
- temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
__builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
uint8x16x2_t temp;
- temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
- temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv16qi (__o, (int8x16_t) temp.val[1], 1);
__builtin_aarch64_st2v8qi ((__builtin_aarch64_simd_qi *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
uint16x8x2_t temp;
- temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
- temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv8hi (__o, (int16x8_t) temp.val[1], 1);
__builtin_aarch64_st2v4hi ((__builtin_aarch64_simd_hi *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
uint32x4x2_t temp;
- temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
- temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv4si (__o, (int32x4_t) temp.val[1], 1);
__builtin_aarch64_st2v2si ((__builtin_aarch64_simd_si *) __a, __o);
{
__builtin_aarch64_simd_oi __o;
float32x4x2_t temp;
- temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
- temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregoiv4sf (__o, (float32x4_t) temp.val[1], 1);
__builtin_aarch64_st2v2sf ((__builtin_aarch64_simd_sf *) __a, __o);
{
__builtin_aarch64_simd_ci __o;
int64x2x3_t temp;
- temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
- temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
- temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
uint64x2x3_t temp;
- temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
- temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
- temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv2di (__o, (int64x2_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
float64x2x3_t temp;
- temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
- temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
- temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv2df (__o, (float64x2_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
int8x16x3_t temp;
- temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
- temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
- temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
poly8x16x3_t temp;
- temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
- temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
- temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
int16x8x3_t temp;
- temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
- temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
- temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
poly16x8x3_t temp;
- temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
- temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
- temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
int32x4x3_t temp;
- temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
- temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
- temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
uint8x16x3_t temp;
- temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
- temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
- temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv16qi (__o, (int8x16_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
uint16x8x3_t temp;
- temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
- temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
- temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv8hi (__o, (int16x8_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
uint32x4x3_t temp;
- temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
- temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
- temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv4si (__o, (int32x4_t) temp.val[2], 2);
{
__builtin_aarch64_simd_ci __o;
float32x4x3_t temp;
- temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
- temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
- temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregciv4sf (__o, (float32x4_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
int64x2x4_t temp;
- temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (INT64_C (0)));
- temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (INT64_C (0)));
- temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (INT64_C (0)));
- temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (INT64_C (0)));
+ temp.val[0] = vcombine_s64 (val.val[0], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s64 (val.val[1], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s64 (val.val[2], vcreate_s64 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s64 (val.val[3], vcreate_s64 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
uint64x2x4_t temp;
- temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (UINT64_C (0)));
- temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (UINT64_C (0)));
- temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (UINT64_C (0)));
- temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (UINT64_C (0)));
+ temp.val[0] = vcombine_u64 (val.val[0], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u64 (val.val[1], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u64 (val.val[2], vcreate_u64 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u64 (val.val[3], vcreate_u64 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv2di (__o, (int64x2_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
float64x2x4_t temp;
- temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (UINT64_C (0)));
- temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (UINT64_C (0)));
- temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (UINT64_C (0)));
- temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (UINT64_C (0)));
+ temp.val[0] = vcombine_f64 (val.val[0], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f64 (val.val[1], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f64 (val.val[2], vcreate_f64 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_f64 (val.val[3], vcreate_f64 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv2df (__o, (float64x2_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
int8x16x4_t temp;
- temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (INT64_C (0)));
- temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (INT64_C (0)));
- temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (INT64_C (0)));
- temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (INT64_C (0)));
+ temp.val[0] = vcombine_s8 (val.val[0], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s8 (val.val[1], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s8 (val.val[2], vcreate_s8 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s8 (val.val[3], vcreate_s8 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
poly8x16x4_t temp;
- temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (UINT64_C (0)));
- temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (UINT64_C (0)));
- temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (UINT64_C (0)));
- temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (UINT64_C (0)));
+ temp.val[0] = vcombine_p8 (val.val[0], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p8 (val.val[1], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p8 (val.val[2], vcreate_p8 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_p8 (val.val[3], vcreate_p8 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
int16x8x4_t temp;
- temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (INT64_C (0)));
- temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (INT64_C (0)));
- temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (INT64_C (0)));
- temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (INT64_C (0)));
+ temp.val[0] = vcombine_s16 (val.val[0], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s16 (val.val[1], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s16 (val.val[2], vcreate_s16 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s16 (val.val[3], vcreate_s16 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
poly16x8x4_t temp;
- temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (UINT64_C (0)));
- temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (UINT64_C (0)));
- temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (UINT64_C (0)));
- temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (UINT64_C (0)));
+ temp.val[0] = vcombine_p16 (val.val[0], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_p16 (val.val[1], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_p16 (val.val[2], vcreate_p16 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_p16 (val.val[3], vcreate_p16 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
int32x4x4_t temp;
- temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (INT64_C (0)));
- temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (INT64_C (0)));
- temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (INT64_C (0)));
- temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (INT64_C (0)));
+ temp.val[0] = vcombine_s32 (val.val[0], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[1] = vcombine_s32 (val.val[1], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[2] = vcombine_s32 (val.val[2], vcreate_s32 (__AARCH64_INT64_C (0)));
+ temp.val[3] = vcombine_s32 (val.val[3], vcreate_s32 (__AARCH64_INT64_C (0)));
__o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
uint8x16x4_t temp;
- temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (UINT64_C (0)));
- temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (UINT64_C (0)));
- temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (UINT64_C (0)));
- temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (UINT64_C (0)));
+ temp.val[0] = vcombine_u8 (val.val[0], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u8 (val.val[1], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u8 (val.val[2], vcreate_u8 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u8 (val.val[3], vcreate_u8 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv16qi (__o, (int8x16_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
uint16x8x4_t temp;
- temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (UINT64_C (0)));
- temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (UINT64_C (0)));
- temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (UINT64_C (0)));
- temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (UINT64_C (0)));
+ temp.val[0] = vcombine_u16 (val.val[0], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u16 (val.val[1], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u16 (val.val[2], vcreate_u16 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u16 (val.val[3], vcreate_u16 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv8hi (__o, (int16x8_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
uint32x4x4_t temp;
- temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (UINT64_C (0)));
- temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (UINT64_C (0)));
- temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (UINT64_C (0)));
- temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (UINT64_C (0)));
+ temp.val[0] = vcombine_u32 (val.val[0], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_u32 (val.val[1], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_u32 (val.val[2], vcreate_u32 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_u32 (val.val[3], vcreate_u32 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv4si (__o, (int32x4_t) temp.val[2], 2);
{
__builtin_aarch64_simd_xi __o;
float32x4x4_t temp;
- temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (UINT64_C (0)));
- temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (UINT64_C (0)));
- temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (UINT64_C (0)));
- temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (UINT64_C (0)));
+ temp.val[0] = vcombine_f32 (val.val[0], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[1] = vcombine_f32 (val.val[1], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[2] = vcombine_f32 (val.val[2], vcreate_f32 (__AARCH64_UINT64_C (0)));
+ temp.val[3] = vcombine_f32 (val.val[3], vcreate_f32 (__AARCH64_UINT64_C (0)));
__o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[0], 0);
__o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[1], 1);
__o = __builtin_aarch64_set_qregxiv4sf (__o, (float32x4_t) temp.val[2], 2);