From 6bc67182b6500b942674d6031c1bf0f02c779cbd Mon Sep 17 00:00:00 2001 From: Richard Sandiford Date: Tue, 29 Oct 2019 08:41:52 +0000 Subject: [PATCH] [AArch64] Handle scalars in cmp and shift immediate queries The SVE ACLE has convenience functions that take scalar arguments instead of vectors. This patch makes it easier to implement the shift and compare functions by making the associated immediate queries work for scalar immediates as well as vector duplicates of them. The "const" codes in the predicates were a holdover from an early version of the SVE port in which we used (const ...) wrappers for variable-length vector constants. I'll remove other instances of them in a separate patch. 2019-10-29 Richard Sandiford gcc/ * config/aarch64/aarch64.c (aarch64_sve_cmp_immediate_p) (aarch64_simd_shift_imm_p): Accept scalars as well as vectors. * config/aarch64/predicates.md (aarch64_sve_cmp_vsc_immediate) (aarch64_sve_cmp_vsd_immediate): Accept "const_int", but don't accept "const". From-SVN: r277556 --- gcc/ChangeLog | 8 ++++++++ gcc/config/aarch64/aarch64.c | 17 +++++++++-------- gcc/config/aarch64/predicates.md | 4 ++-- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/gcc/ChangeLog b/gcc/ChangeLog index 4bb1f91..a85c2ae 100644 --- a/gcc/ChangeLog +++ b/gcc/ChangeLog @@ -1,5 +1,13 @@ 2019-10-29 Richard Sandiford + * config/aarch64/aarch64.c (aarch64_sve_cmp_immediate_p) + (aarch64_simd_shift_imm_p): Accept scalars as well as vectors. + * config/aarch64/predicates.md (aarch64_sve_cmp_vsc_immediate) + (aarch64_sve_cmp_vsd_immediate): Accept "const_int", but don't + accept "const". + +2019-10-29 Richard Sandiford + * coretypes.h (string_int_pair): New typedef. * langhooks-def.h (LANG_HOOKS_SIMULATE_ENUM_DECL): Define. (LANG_HOOKS_FOR_TYPES_INITIALIZER): Include it. diff --git a/gcc/config/aarch64/aarch64.c b/gcc/config/aarch64/aarch64.c index e439615..a19494e 100644 --- a/gcc/config/aarch64/aarch64.c +++ b/gcc/config/aarch64/aarch64.c @@ -15426,13 +15426,11 @@ aarch64_sve_dup_immediate_p (rtx x) bool aarch64_sve_cmp_immediate_p (rtx x, bool signed_p) { - rtx elt; - - return (const_vec_duplicate_p (x, &elt) - && CONST_INT_P (elt) + x = unwrap_const_vec_duplicate (x); + return (CONST_INT_P (x) && (signed_p - ? IN_RANGE (INTVAL (elt), -16, 15) - : IN_RANGE (INTVAL (elt), 0, 127))); + ? IN_RANGE (INTVAL (x), -16, 15) + : IN_RANGE (INTVAL (x), 0, 127))); } /* Return true if X is a valid immediate operand for an SVE FADD or FSUB @@ -15784,11 +15782,14 @@ aarch64_check_zero_based_sve_index_immediate (rtx x) bool aarch64_simd_shift_imm_p (rtx x, machine_mode mode, bool left) { + x = unwrap_const_vec_duplicate (x); + if (!CONST_INT_P (x)) + return false; int bit_width = GET_MODE_UNIT_SIZE (mode) * BITS_PER_UNIT; if (left) - return aarch64_const_vec_all_same_in_range_p (x, 0, bit_width - 1); + return IN_RANGE (INTVAL (x), 0, bit_width - 1); else - return aarch64_const_vec_all_same_in_range_p (x, 1, bit_width); + return IN_RANGE (INTVAL (x), 1, bit_width); } /* Return the bitmask CONST_INT to select the bits required by a zero extract diff --git a/gcc/config/aarch64/predicates.md b/gcc/config/aarch64/predicates.md index d8c3779..2b9aa4a 100644 --- a/gcc/config/aarch64/predicates.md +++ b/gcc/config/aarch64/predicates.md @@ -661,11 +661,11 @@ (match_test "aarch64_float_const_representable_p (op)")))) (define_predicate "aarch64_sve_cmp_vsc_immediate" - (and (match_code "const,const_vector") + (and (match_code "const_int,const_vector") (match_test "aarch64_sve_cmp_immediate_p (op, true)"))) (define_predicate "aarch64_sve_cmp_vsd_immediate" - (and (match_code "const,const_vector") + (and (match_code "const_int,const_vector") (match_test "aarch64_sve_cmp_immediate_p (op, false)"))) (define_predicate "aarch64_sve_index_immediate" -- 2.7.4