From 5726d3760b3b0c939a3ef3001f2cc1d86c08eac7 Mon Sep 17 00:00:00 2001 From: Alan Lawrence Date: Fri, 5 Sep 2014 11:09:28 +0000 Subject: [PATCH] [PATCH AArch64 2/2] Remove vector compare/tst __builtins * config/aarch64/aarch64-builtins.c (aarch64_fold_builtin): Remove code handling cmge, cmgt, cmeq, cmtst. * config/aarch64/aarch64-simd-builtins.def (cmeq, cmge, cmgt, cmle, cmlt, cmgeu, cmgtu, cmtst): Remove. * config/aarch64/arm_neon.h (vceq_*, vceqq_*, vceqz_*, vceqzq_*, vcge_*, vcgeq_*, vcgez_*, vcgezq_*, vcgt_*, vcgtq_*, vcgtz_*, vcgtzq_*, vcle_*, vcleq_*, vclez_*, vclezq_*, vclt_*, vcltq_*, vcltz_*, vcltzq_*, vtst_*, vtstq_*): Use gcc vector extensions. From-SVN: r214949 --- gcc/ChangeLog | 13 + gcc/config/aarch64/aarch64-builtins.c | 16 - gcc/config/aarch64/aarch64-simd-builtins.def | 11 - gcc/config/aarch64/arm_neon.h | 439 ++++++++++----------------- 4 files changed, 173 insertions(+), 306 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 4be40e9..853832b 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,18 @@ 2014-09-05 Alan Lawrence + * config/aarch64/aarch64-builtins.c (aarch64_fold_builtin): Remove code + handling cmge, cmgt, cmeq, cmtst. + + * config/aarch64/aarch64-simd-builtins.def (cmeq, cmge, cmgt, cmle, + cmlt, cmgeu, cmgtu, cmtst): Remove. + + * config/aarch64/arm_neon.h (vceq_*, vceqq_*, vceqz_*, vceqzq_*, + vcge_*, vcgeq_*, vcgez_*, vcgezq_*, vcgt_*, vcgtq_*, vcgtz_*, + vcgtzq_*, vcle_*, vcleq_*, vclez_*, vclezq_*, vclt_*, vcltq_*, + vcltz_*, vcltzq_*, vtst_*, vtstq_*): Use gcc vector extensions. + +2014-09-05 Alan Lawrence + * config/aarch64/aarch64-builtins.c (aarch64_types_cmtst_qualifiers, TYPES_TST): Define. (aarch64_fold_builtin): Update pattern for cmtst. diff --git a/gcc/config/aarch64/aarch64-builtins.c b/gcc/config/aarch64/aarch64-builtins.c index 32e902d..42a1e85 100644 --- a/gcc/config/aarch64/aarch64-builtins.c +++ b/gcc/config/aarch64/aarch64-builtins.c @@ -1281,22 +1281,6 @@ aarch64_fold_builtin (tree fndecl, int n_args ATTRIBUTE_UNUSED, tree *args, BUILTIN_VALLDI (UNOP, abs, 2) return fold_build1 (ABS_EXPR, type, args[0]); break; - BUILTIN_VALLDI (BINOP, cmge, 0) - return fold_build2 (GE_EXPR, type, args[0], args[1]); - break; - BUILTIN_VALLDI (BINOP, cmgt, 0) - return fold_build2 (GT_EXPR, type, args[0], args[1]); - break; - BUILTIN_VALLDI (BINOP, cmeq, 0) - return fold_build2 (EQ_EXPR, type, args[0], args[1]); - break; - BUILTIN_VSDQ_I_DI (TST, cmtst, 0) - { - tree and_node = fold_build2 (BIT_AND_EXPR, type, args[0], args[1]); - tree vec_zero_node = build_zero_cst (type); - return fold_build2 (NE_EXPR, type, and_node, vec_zero_node); - break; - } VAR1 (REINTERP_SS, reinterpretdi, 0, v1df) VAR1 (REINTERP_SS, reinterpretv8qi, 0, v1df) VAR1 (REINTERP_SS, reinterpretv4hi, 0, v1df) diff --git a/gcc/config/aarch64/aarch64-simd-builtins.def b/gcc/config/aarch64/aarch64-simd-builtins.def index 9bde1ac..67f05d9 100644 --- a/gcc/config/aarch64/aarch64-simd-builtins.def +++ b/gcc/config/aarch64/aarch64-simd-builtins.def @@ -237,17 +237,6 @@ BUILTIN_VSDQ_I (SHIFTIMM, sqshl_n, 0) BUILTIN_VSDQ_I (USHIFTIMM, uqshl_n, 0) - /* Implemented by aarch64_cm. */ - BUILTIN_VALLDI (BINOP, cmeq, 0) - BUILTIN_VALLDI (BINOP, cmge, 0) - BUILTIN_VALLDI (BINOP, cmgt, 0) - BUILTIN_VALLDI (BINOP, cmle, 0) - BUILTIN_VALLDI (BINOP, cmlt, 0) - /* Implemented by aarch64_cm. */ - BUILTIN_VSDQ_I_DI (BINOP, cmgeu, 0) - BUILTIN_VSDQ_I_DI (BINOP, cmgtu, 0) - BUILTIN_VSDQ_I_DI (TST, cmtst, 0) - /* Implemented by reduc_plus_. */ BUILTIN_VALL (UNOP, reduc_splus_, 10) BUILTIN_VDQ (UNOP, reduc_uplus_, 10) diff --git a/gcc/config/aarch64/arm_neon.h b/gcc/config/aarch64/arm_neon.h index 86926b9..b33dc5c 100644 --- a/gcc/config/aarch64/arm_neon.h +++ b/gcc/config/aarch64/arm_neon.h @@ -13865,7 +13865,7 @@ vcalts_f32 (float32_t __a, float32_t __b) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceq_f32 (float32x2_t __a, float32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmeqv2sf (__a, __b); + return (uint32x2_t) (__a == __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -13877,26 +13877,25 @@ vceq_f64 (float64x1_t __a, float64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceq_p8 (poly8x8_t __a, poly8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return (uint8x8_t) (__a == __b); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceq_s8 (int8x8_t __a, int8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b); + return (uint8x8_t) (__a == __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceq_s16 (int16x4_t __a, int16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b); + return (uint16x4_t) (__a == __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceq_s32 (int32x2_t __a, int32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b); + return (uint32x2_t) (__a == __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -13908,22 +13907,19 @@ vceq_s64 (int64x1_t __a, int64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceq_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return (__a == __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceq_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return (__a == __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceq_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a, - (int32x2_t) __b); + return (__a == __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -13935,72 +13931,67 @@ vceq_u64 (uint64x1_t __a, uint64x1_t __b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqq_f32 (float32x4_t __a, float32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmeqv4sf (__a, __b); + return (uint32x4_t) (__a == __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqq_f64 (float64x2_t __a, float64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmeqv2df (__a, __b); + return (uint64x2_t) (__a == __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqq_p8 (poly8x16_t __a, poly8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return (uint8x16_t) (__a == __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqq_s8 (int8x16_t __a, int8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b); + return (uint8x16_t) (__a == __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqq_s16 (int16x8_t __a, int16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b); + return (uint16x8_t) (__a == __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqq_s32 (int32x4_t __a, int32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b); + return (uint32x4_t) (__a == __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqq_s64 (int64x2_t __a, int64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b); + return (uint64x2_t) (__a == __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return (__a == __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return (__a == __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a, - (int32x4_t) __b); + return (__a == __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a, - (int64x2_t) __b); + return (__a == __b); } /* vceq - scalar. */ @@ -14034,8 +14025,7 @@ vceqd_f64 (float64_t __a, float64_t __b) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceqz_f32 (float32x2_t __a) { - float32x2_t __b = {0.0f, 0.0f}; - return (uint32x2_t) __builtin_aarch64_cmeqv2sf (__a, __b); + return (uint32x2_t) (__a == 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14047,30 +14037,25 @@ vceqz_f64 (float64x1_t __a) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceqz_p8 (poly8x8_t __a) { - poly8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return (uint8x8_t) (__a == 0); } __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceqz_s8 (int8x8_t __a) { - int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmeqv8qi (__a, __b); + return (uint8x8_t) (__a == 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceqz_s16 (int16x4_t __a) { - int16x4_t __b = {0, 0, 0, 0}; - return (uint16x4_t) __builtin_aarch64_cmeqv4hi (__a, __b); + return (uint16x4_t) (__a == 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceqz_s32 (int32x2_t __a) { - int32x2_t __b = {0, 0}; - return (uint32x2_t) __builtin_aarch64_cmeqv2si (__a, __b); + return (uint32x2_t) (__a == 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14082,25 +14067,19 @@ vceqz_s64 (int64x1_t __a) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vceqz_u8 (uint8x8_t __a) { - uint8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmeqv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return (__a == 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vceqz_u16 (uint16x4_t __a) { - uint16x4_t __b = {0, 0, 0, 0}; - return (uint16x4_t) __builtin_aarch64_cmeqv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return (__a == 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vceqz_u32 (uint32x2_t __a) { - uint32x2_t __b = {0, 0}; - return (uint32x2_t) __builtin_aarch64_cmeqv2si ((int32x2_t) __a, - (int32x2_t) __b); + return (__a == 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14112,86 +14091,67 @@ vceqz_u64 (uint64x1_t __a) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqzq_f32 (float32x4_t __a) { - float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f}; - return (uint32x4_t) __builtin_aarch64_cmeqv4sf (__a, __b); + return (uint32x4_t) (__a == 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqzq_f64 (float64x2_t __a) { - float64x2_t __b = {0.0, 0.0}; - return (uint64x2_t) __builtin_aarch64_cmeqv2df (__a, __b); + return (uint64x2_t) (__a == 0.0f); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqzq_p8 (poly8x16_t __a) { - poly8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return (uint8x16_t) (__a == 0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqzq_s8 (int8x16_t __a) { - int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmeqv16qi (__a, __b); + return (uint8x16_t) (__a == 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqzq_s16 (int16x8_t __a) { - int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint16x8_t) __builtin_aarch64_cmeqv8hi (__a, __b); + return (uint16x8_t) (__a == 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqzq_s32 (int32x4_t __a) { - int32x4_t __b = {0, 0, 0, 0}; - return (uint32x4_t) __builtin_aarch64_cmeqv4si (__a, __b); + return (uint32x4_t) (__a == 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqzq_s64 (int64x2_t __a) { - int64x2_t __b = {0, 0}; - return (uint64x2_t) __builtin_aarch64_cmeqv2di (__a, __b); + return (uint64x2_t) (__a == __AARCH64_INT64_C (0)); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vceqzq_u8 (uint8x16_t __a) { - uint8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmeqv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return (__a == 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vceqzq_u16 (uint16x8_t __a) { - uint16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint16x8_t) __builtin_aarch64_cmeqv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return (__a == 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vceqzq_u32 (uint32x4_t __a) { - uint32x4_t __b = {0, 0, 0, 0}; - return (uint32x4_t) __builtin_aarch64_cmeqv4si ((int32x4_t) __a, - (int32x4_t) __b); + return (__a == 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vceqzq_u64 (uint64x2_t __a) { - uint64x2_t __b = {0, 0}; - return (uint64x2_t) __builtin_aarch64_cmeqv2di ((int64x2_t) __a, - (int64x2_t) __b); + return (__a == __AARCH64_UINT64_C (0)); } /* vceqz - scalar. */ @@ -14225,7 +14185,7 @@ vceqzd_f64 (float64_t __a) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcge_f32 (float32x2_t __a, float32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgev2sf (__a, __b); + return (uint32x2_t) (__a >= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14237,19 +14197,19 @@ vcge_f64 (float64x1_t __a, float64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcge_s8 (int8x8_t __a, int8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b); + return (uint8x8_t) (__a >= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcge_s16 (int16x4_t __a, int16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b); + return (uint16x4_t) (__a >= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcge_s32 (int32x2_t __a, int32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b); + return (uint32x2_t) (__a >= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14261,22 +14221,19 @@ vcge_s64 (int64x1_t __a, int64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcge_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgeuv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return (__a >= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcge_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgeuv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return (__a >= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcge_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgeuv2si ((int32x2_t) __a, - (int32x2_t) __b); + return (__a >= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14288,65 +14245,61 @@ vcge_u64 (uint64x1_t __a, uint64x1_t __b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgeq_f32 (float32x4_t __a, float32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgev4sf (__a, __b); + return (uint32x4_t) (__a >= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgeq_f64 (float64x2_t __a, float64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgev2df (__a, __b); + return (uint64x2_t) (__a >= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgeq_s8 (int8x16_t __a, int8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b); + return (uint8x16_t) (__a >= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgeq_s16 (int16x8_t __a, int16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b); + return (uint16x8_t) (__a >= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgeq_s32 (int32x4_t __a, int32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b); + return (uint32x4_t) (__a >= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgeq_s64 (int64x2_t __a, int64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b); + return (uint64x2_t) (__a >= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgeq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgeuv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return (__a >= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgeq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgeuv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return (__a >= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgeq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgeuv4si ((int32x4_t) __a, - (int32x4_t) __b); + return (__a >= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgeq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgeuv2di ((int64x2_t) __a, - (int64x2_t) __b); + return (__a >= __b); } /* vcge - scalar. */ @@ -14380,8 +14333,7 @@ vcged_f64 (float64_t __a, float64_t __b) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgez_f32 (float32x2_t __a) { - float32x2_t __b = {0.0f, 0.0f}; - return (uint32x2_t) __builtin_aarch64_cmgev2sf (__a, __b); + return (uint32x2_t) (__a >= 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14393,22 +14345,19 @@ vcgez_f64 (float64x1_t __a) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgez_s8 (int8x8_t __a) { - int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmgev8qi (__a, __b); + return (uint8x8_t) (__a >= 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgez_s16 (int16x4_t __a) { - int16x4_t __b = {0, 0, 0, 0}; - return (uint16x4_t) __builtin_aarch64_cmgev4hi (__a, __b); + return (uint16x4_t) (__a >= 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgez_s32 (int32x2_t __a) { - int32x2_t __b = {0, 0}; - return (uint32x2_t) __builtin_aarch64_cmgev2si (__a, __b); + return (uint32x2_t) (__a >= 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14420,44 +14369,37 @@ vcgez_s64 (int64x1_t __a) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgezq_f32 (float32x4_t __a) { - float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f}; - return (uint32x4_t) __builtin_aarch64_cmgev4sf (__a, __b); + return (uint32x4_t) (__a >= 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgezq_f64 (float64x2_t __a) { - float64x2_t __b = {0.0, 0.0}; - return (uint64x2_t) __builtin_aarch64_cmgev2df (__a, __b); + return (uint64x2_t) (__a >= 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgezq_s8 (int8x16_t __a) { - int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmgev16qi (__a, __b); + return (uint8x16_t) (__a >= 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgezq_s16 (int16x8_t __a) { - int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint16x8_t) __builtin_aarch64_cmgev8hi (__a, __b); + return (uint16x8_t) (__a >= 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgezq_s32 (int32x4_t __a) { - int32x4_t __b = {0, 0, 0, 0}; - return (uint32x4_t) __builtin_aarch64_cmgev4si (__a, __b); + return (uint32x4_t) (__a >= 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgezq_s64 (int64x2_t __a) { - int64x2_t __b = {0, 0}; - return (uint64x2_t) __builtin_aarch64_cmgev2di (__a, __b); + return (uint64x2_t) (__a >= __AARCH64_INT64_C (0)); } /* vcgez - scalar. */ @@ -14485,7 +14427,7 @@ vcgezd_f64 (float64_t __a) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgt_f32 (float32x2_t __a, float32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgtv2sf (__a, __b); + return (uint32x2_t) (__a > __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14497,19 +14439,19 @@ vcgt_f64 (float64x1_t __a, float64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgt_s8 (int8x8_t __a, int8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b); + return (uint8x8_t) (__a > __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgt_s16 (int16x4_t __a, int16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b); + return (uint16x4_t) (__a > __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgt_s32 (int32x2_t __a, int32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b); + return (uint32x2_t) (__a > __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14521,22 +14463,19 @@ vcgt_s64 (int64x1_t __a, int64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgt_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgtuv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return (__a > __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgt_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgtuv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return (__a > __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgt_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgtuv2si ((int32x2_t) __a, - (int32x2_t) __b); + return (__a > __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14548,65 +14487,61 @@ vcgt_u64 (uint64x1_t __a, uint64x1_t __b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtq_f32 (float32x4_t __a, float32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgtv4sf (__a, __b); + return (uint32x4_t) (__a > __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtq_f64 (float64x2_t __a, float64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgtv2df (__a, __b); + return (uint64x2_t) (__a > __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgtq_s8 (int8x16_t __a, int8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b); + return (uint8x16_t) (__a > __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgtq_s16 (int16x8_t __a, int16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b); + return (uint16x8_t) (__a > __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtq_s32 (int32x4_t __a, int32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b); + return (uint32x4_t) (__a > __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtq_s64 (int64x2_t __a, int64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b); + return (uint64x2_t) (__a > __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgtq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgtuv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return (__a > __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgtq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgtuv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return (__a > __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgtuv4si ((int32x4_t) __a, - (int32x4_t) __b); + return (__a > __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgtuv2di ((int64x2_t) __a, - (int64x2_t) __b); + return (__a > __b); } /* vcgt - scalar. */ @@ -14640,8 +14575,7 @@ vcgtd_f64 (float64_t __a, float64_t __b) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgtz_f32 (float32x2_t __a) { - float32x2_t __b = {0.0f, 0.0f}; - return (uint32x2_t) __builtin_aarch64_cmgtv2sf (__a, __b); + return (uint32x2_t) (__a > 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14653,22 +14587,19 @@ vcgtz_f64 (float64x1_t __a) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcgtz_s8 (int8x8_t __a) { - int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__a, __b); + return (uint8x8_t) (__a > 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcgtz_s16 (int16x4_t __a) { - int16x4_t __b = {0, 0, 0, 0}; - return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__a, __b); + return (uint16x4_t) (__a > 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcgtz_s32 (int32x2_t __a) { - int32x2_t __b = {0, 0}; - return (uint32x2_t) __builtin_aarch64_cmgtv2si (__a, __b); + return (uint32x2_t) (__a > 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14680,44 +14611,37 @@ vcgtz_s64 (int64x1_t __a) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtzq_f32 (float32x4_t __a) { - float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f}; - return (uint32x4_t) __builtin_aarch64_cmgtv4sf (__a, __b); + return (uint32x4_t) (__a > 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtzq_f64 (float64x2_t __a) { - float64x2_t __b = {0.0, 0.0}; - return (uint64x2_t) __builtin_aarch64_cmgtv2df (__a, __b); + return (uint64x2_t) (__a > 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcgtzq_s8 (int8x16_t __a) { - int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__a, __b); + return (uint8x16_t) (__a > 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcgtzq_s16 (int16x8_t __a) { - int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__a, __b); + return (uint16x8_t) (__a > 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcgtzq_s32 (int32x4_t __a) { - int32x4_t __b = {0, 0, 0, 0}; - return (uint32x4_t) __builtin_aarch64_cmgtv4si (__a, __b); + return (uint32x4_t) (__a > 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcgtzq_s64 (int64x2_t __a) { - int64x2_t __b = {0, 0}; - return (uint64x2_t) __builtin_aarch64_cmgtv2di (__a, __b); + return (uint64x2_t) (__a > __AARCH64_INT64_C (0)); } /* vcgtz - scalar. */ @@ -14745,7 +14669,7 @@ vcgtzd_f64 (float64_t __a) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcle_f32 (float32x2_t __a, float32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgev2sf (__b, __a); + return (uint32x2_t) (__a <= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14757,19 +14681,19 @@ vcle_f64 (float64x1_t __a, float64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcle_s8 (int8x8_t __a, int8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgev8qi (__b, __a); + return (uint8x8_t) (__a <= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcle_s16 (int16x4_t __a, int16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgev4hi (__b, __a); + return (uint16x4_t) (__a <= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcle_s32 (int32x2_t __a, int32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgev2si (__b, __a); + return (uint32x2_t) (__a <= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14781,22 +14705,19 @@ vcle_s64 (int64x1_t __a, int64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcle_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgeuv8qi ((int8x8_t) __b, - (int8x8_t) __a); + return (__a <= __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcle_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgeuv4hi ((int16x4_t) __b, - (int16x4_t) __a); + return (__a <= __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcle_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgeuv2si ((int32x2_t) __b, - (int32x2_t) __a); + return (__a <= __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14808,65 +14729,61 @@ vcle_u64 (uint64x1_t __a, uint64x1_t __b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcleq_f32 (float32x4_t __a, float32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgev4sf (__b, __a); + return (uint32x4_t) (__a <= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcleq_f64 (float64x2_t __a, float64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgev2df (__b, __a); + return (uint64x2_t) (__a <= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcleq_s8 (int8x16_t __a, int8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgev16qi (__b, __a); + return (uint8x16_t) (__a <= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcleq_s16 (int16x8_t __a, int16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgev8hi (__b, __a); + return (uint16x8_t) (__a <= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcleq_s32 (int32x4_t __a, int32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgev4si (__b, __a); + return (uint32x4_t) (__a <= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcleq_s64 (int64x2_t __a, int64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgev2di (__b, __a); + return (uint64x2_t) (__a <= __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcleq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgeuv16qi ((int8x16_t) __b, - (int8x16_t) __a); + return (__a <= __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcleq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgeuv8hi ((int16x8_t) __b, - (int16x8_t) __a); + return (__a <= __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcleq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgeuv4si ((int32x4_t) __b, - (int32x4_t) __a); + return (__a <= __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcleq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgeuv2di ((int64x2_t) __b, - (int64x2_t) __a); + return (__a <= __b); } /* vcle - scalar. */ @@ -14900,8 +14817,7 @@ vcled_f64 (float64_t __a, float64_t __b) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclez_f32 (float32x2_t __a) { - float32x2_t __b = {0.0f, 0.0f}; - return (uint32x2_t) __builtin_aarch64_cmlev2sf (__a, __b); + return (uint32x2_t) (__a <= 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14913,22 +14829,19 @@ vclez_f64 (float64x1_t __a) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclez_s8 (int8x8_t __a) { - int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmlev8qi (__a, __b); + return (uint8x8_t) (__a <= 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclez_s16 (int16x4_t __a) { - int16x4_t __b = {0, 0, 0, 0}; - return (uint16x4_t) __builtin_aarch64_cmlev4hi (__a, __b); + return (uint16x4_t) (__a <= 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclez_s32 (int32x2_t __a) { - int32x2_t __b = {0, 0}; - return (uint32x2_t) __builtin_aarch64_cmlev2si (__a, __b); + return (uint32x2_t) (__a <= 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -14940,44 +14853,37 @@ vclez_s64 (int64x1_t __a) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vclezq_f32 (float32x4_t __a) { - float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f}; - return (uint32x4_t) __builtin_aarch64_cmlev4sf (__a, __b); + return (uint32x4_t) (__a <= 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vclezq_f64 (float64x2_t __a) { - float64x2_t __b = {0.0, 0.0}; - return (uint64x2_t) __builtin_aarch64_cmlev2df (__a, __b); + return (uint64x2_t) (__a <= 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vclezq_s8 (int8x16_t __a) { - int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmlev16qi (__a, __b); + return (uint8x16_t) (__a <= 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vclezq_s16 (int16x8_t __a) { - int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint16x8_t) __builtin_aarch64_cmlev8hi (__a, __b); + return (uint16x8_t) (__a <= 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vclezq_s32 (int32x4_t __a) { - int32x4_t __b = {0, 0, 0, 0}; - return (uint32x4_t) __builtin_aarch64_cmlev4si (__a, __b); + return (uint32x4_t) (__a <= 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vclezq_s64 (int64x2_t __a) { - int64x2_t __b = {0, 0}; - return (uint64x2_t) __builtin_aarch64_cmlev2di (__a, __b); + return (uint64x2_t) (__a <= __AARCH64_INT64_C (0)); } /* vclez - scalar. */ @@ -15005,7 +14911,7 @@ vclezd_f64 (float64_t __a) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclt_f32 (float32x2_t __a, float32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgtv2sf (__b, __a); + return (uint32x2_t) (__a < __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -15017,19 +14923,19 @@ vclt_f64 (float64x1_t __a, float64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclt_s8 (int8x8_t __a, int8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgtv8qi (__b, __a); + return (uint8x8_t) (__a < __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclt_s16 (int16x4_t __a, int16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgtv4hi (__b, __a); + return (uint16x4_t) (__a < __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclt_s32 (int32x2_t __a, int32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgtv2si (__b, __a); + return (uint32x2_t) (__a < __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -15041,22 +14947,19 @@ vclt_s64 (int64x1_t __a, int64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vclt_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmgtuv8qi ((int8x8_t) __b, - (int8x8_t) __a); + return (__a < __b); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vclt_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmgtuv4hi ((int16x4_t) __b, - (int16x4_t) __a); + return (__a < __b); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vclt_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmgtuv2si ((int32x2_t) __b, - (int32x2_t) __a); + return (__a < __b); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -15068,65 +14971,61 @@ vclt_u64 (uint64x1_t __a, uint64x1_t __b) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltq_f32 (float32x4_t __a, float32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgtv4sf (__b, __a); + return (uint32x4_t) (__a < __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltq_f64 (float64x2_t __a, float64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgtv2df (__b, __a); + return (uint64x2_t) (__a < __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcltq_s8 (int8x16_t __a, int8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgtv16qi (__b, __a); + return (uint8x16_t) (__a < __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcltq_s16 (int16x8_t __a, int16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgtv8hi (__b, __a); + return (uint16x8_t) (__a < __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltq_s32 (int32x4_t __a, int32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgtv4si (__b, __a); + return (uint32x4_t) (__a < __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltq_s64 (int64x2_t __a, int64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgtv2di (__b, __a); + return (uint64x2_t) (__a < __b); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcltq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmgtuv16qi ((int8x16_t) __b, - (int8x16_t) __a); + return (__a < __b); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcltq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmgtuv8hi ((int16x8_t) __b, - (int16x8_t) __a); + return (__a < __b); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmgtuv4si ((int32x4_t) __b, - (int32x4_t) __a); + return (__a < __b); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmgtuv2di ((int64x2_t) __b, - (int64x2_t) __a); + return (__a < __b); } /* vclt - scalar. */ @@ -15160,8 +15059,7 @@ vcltd_f64 (float64_t __a, float64_t __b) __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcltz_f32 (float32x2_t __a) { - float32x2_t __b = {0.0f, 0.0f}; - return (uint32x2_t) __builtin_aarch64_cmltv2sf (__a, __b); + return (uint32x2_t) (__a < 0.0f); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -15173,22 +15071,19 @@ vcltz_f64 (float64x1_t __a) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vcltz_s8 (int8x8_t __a) { - int8x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x8_t) __builtin_aarch64_cmltv8qi (__a, __b); + return (uint8x8_t) (__a < 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vcltz_s16 (int16x4_t __a) { - int16x4_t __b = {0, 0, 0, 0}; - return (uint16x4_t) __builtin_aarch64_cmltv4hi (__a, __b); + return (uint16x4_t) (__a < 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vcltz_s32 (int32x2_t __a) { - int32x2_t __b = {0, 0}; - return (uint32x2_t) __builtin_aarch64_cmltv2si (__a, __b); + return (uint32x2_t) (__a < 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -15200,44 +15095,37 @@ vcltz_s64 (int64x1_t __a) __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltzq_f32 (float32x4_t __a) { - float32x4_t __b = {0.0f, 0.0f, 0.0f, 0.0f}; - return (uint32x4_t) __builtin_aarch64_cmltv4sf (__a, __b); + return (uint32x4_t) (__a < 0.0f); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltzq_f64 (float64x2_t __a) { - float64x2_t __b = {0.0, 0.0}; - return (uint64x2_t) __builtin_aarch64_cmltv2df (__a, __b); + return (uint64x2_t) (__a < 0.0); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vcltzq_s8 (int8x16_t __a) { - int8x16_t __b = {0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0}; - return (uint8x16_t) __builtin_aarch64_cmltv16qi (__a, __b); + return (uint8x16_t) (__a < 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vcltzq_s16 (int16x8_t __a) { - int16x8_t __b = {0, 0, 0, 0, 0, 0, 0, 0}; - return (uint16x8_t) __builtin_aarch64_cmltv8hi (__a, __b); + return (uint16x8_t) (__a < 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vcltzq_s32 (int32x4_t __a) { - int32x4_t __b = {0, 0, 0, 0}; - return (uint32x4_t) __builtin_aarch64_cmltv4si (__a, __b); + return (uint32x4_t) (__a < 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vcltzq_s64 (int64x2_t __a) { - int64x2_t __b = {0, 0}; - return (uint64x2_t) __builtin_aarch64_cmltv2di (__a, __b); + return (uint64x2_t) (__a < __AARCH64_INT64_C (0)); } /* vcltz - scalar. */ @@ -24139,19 +24027,19 @@ vtrnq_u32 (uint32x4_t a, uint32x4_t b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtst_s8 (int8x8_t __a, int8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmtstv8qi (__a, __b); + return (uint8x8_t) ((__a & __b) != 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtst_s16 (int16x4_t __a, int16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmtstv4hi (__a, __b); + return (uint16x4_t) ((__a & __b) != 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vtst_s32 (int32x2_t __a, int32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmtstv2si (__a, __b); + return (uint32x2_t) ((__a & __b) != 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -24163,22 +24051,19 @@ vtst_s64 (int64x1_t __a, int64x1_t __b) __extension__ static __inline uint8x8_t __attribute__ ((__always_inline__)) vtst_u8 (uint8x8_t __a, uint8x8_t __b) { - return (uint8x8_t) __builtin_aarch64_cmtstv8qi ((int8x8_t) __a, - (int8x8_t) __b); + return ((__a & __b) != 0); } __extension__ static __inline uint16x4_t __attribute__ ((__always_inline__)) vtst_u16 (uint16x4_t __a, uint16x4_t __b) { - return (uint16x4_t) __builtin_aarch64_cmtstv4hi ((int16x4_t) __a, - (int16x4_t) __b); + return ((__a & __b) != 0); } __extension__ static __inline uint32x2_t __attribute__ ((__always_inline__)) vtst_u32 (uint32x2_t __a, uint32x2_t __b) { - return (uint32x2_t) __builtin_aarch64_cmtstv2si ((int32x2_t) __a, - (int32x2_t) __b); + return ((__a & __b) != 0); } __extension__ static __inline uint64x1_t __attribute__ ((__always_inline__)) @@ -24190,53 +24075,49 @@ vtst_u64 (uint64x1_t __a, uint64x1_t __b) __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtstq_s8 (int8x16_t __a, int8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmtstv16qi (__a, __b); + return (uint8x16_t) ((__a & __b) != 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtstq_s16 (int16x8_t __a, int16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmtstv8hi (__a, __b); + return (uint16x8_t) ((__a & __b) != 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vtstq_s32 (int32x4_t __a, int32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmtstv4si (__a, __b); + return (uint32x4_t) ((__a & __b) != 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vtstq_s64 (int64x2_t __a, int64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmtstv2di (__a, __b); + return (uint64x2_t) ((__a & __b) != __AARCH64_INT64_C (0)); } __extension__ static __inline uint8x16_t __attribute__ ((__always_inline__)) vtstq_u8 (uint8x16_t __a, uint8x16_t __b) { - return (uint8x16_t) __builtin_aarch64_cmtstv16qi ((int8x16_t) __a, - (int8x16_t) __b); + return ((__a & __b) != 0); } __extension__ static __inline uint16x8_t __attribute__ ((__always_inline__)) vtstq_u16 (uint16x8_t __a, uint16x8_t __b) { - return (uint16x8_t) __builtin_aarch64_cmtstv8hi ((int16x8_t) __a, - (int16x8_t) __b); + return ((__a & __b) != 0); } __extension__ static __inline uint32x4_t __attribute__ ((__always_inline__)) vtstq_u32 (uint32x4_t __a, uint32x4_t __b) { - return (uint32x4_t) __builtin_aarch64_cmtstv4si ((int32x4_t) __a, - (int32x4_t) __b); + return ((__a & __b) != 0); } __extension__ static __inline uint64x2_t __attribute__ ((__always_inline__)) vtstq_u64 (uint64x2_t __a, uint64x2_t __b) { - return (uint64x2_t) __builtin_aarch64_cmtstv2di ((int64x2_t) __a, - (int64x2_t) __b); + return ((__a & __b) != __AARCH64_UINT64_C (0)); } __extension__ static __inline uint64_t __attribute__ ((__always_inline__)) -- 2.7.4