;;
;; [vshrq_n_s, vshrq_n_u])
;;
+;; Version that takes an immediate as operand 2.
(define_insn "mve_vshrq_n_<supf><mode>"
[
(set (match_operand:MVE_2 0 "s_register_operand" "=w")
[(set_attr "type" "mve_move")
])
+;; Versions that take constant vectors as operand 2 (with all elements
+;; equal).
+(define_insn "mve_vshrq_n_s<mode>_imm"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (ashiftrt:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "imm_for_neon_rshift_operand" "i")))
+ ]
+ "TARGET_HAVE_MVE"
+ {
+ return neon_output_shift_immediate ("vshr", 's', &operands[2],
+ <MODE>mode,
+ VALID_NEON_QREG_MODE (<MODE>mode),
+ true);
+ }
+ [(set_attr "type" "mve_move")
+])
+(define_insn "mve_vshrq_n_u<mode>_imm"
+ [
+ (set (match_operand:MVE_2 0 "s_register_operand" "=w")
+ (lshiftrt:MVE_2 (match_operand:MVE_2 1 "s_register_operand" "w")
+ (match_operand:MVE_2 2 "imm_for_neon_rshift_operand" "i")))
+ ]
+ "TARGET_HAVE_MVE"
+ {
+ return neon_output_shift_immediate ("vshr", 'u', &operands[2],
+ <MODE>mode,
+ VALID_NEON_QREG_MODE (<MODE>mode),
+ true);
+ }
+ [(set_attr "type" "mve_move")
+])
+
;;
;; [vcvtq_n_from_f_s, vcvtq_n_from_f_u])
;;
[(set_attr "type" "neon_shift_reg<q>")]
)
-(define_expand "vashr<mode>3"
- [(set (match_operand:VDQIW 0 "s_register_operand")
- (ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
- (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
- "TARGET_NEON"
-{
- if (s_register_operand (operands[2], <MODE>mode))
- {
- rtx neg = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_neon_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_ashl<mode>3_signed (operands[0], operands[1], neg));
- }
- else
- emit_insn (gen_vashr<mode>3_imm (operands[0], operands[1], operands[2]));
- DONE;
-})
-
-(define_expand "vlshr<mode>3"
- [(set (match_operand:VDQIW 0 "s_register_operand")
- (lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
- (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
- "TARGET_NEON"
-{
- if (s_register_operand (operands[2], <MODE>mode))
- {
- rtx neg = gen_reg_rtx (<MODE>mode);
- emit_insn (gen_neon_neg<mode>2 (neg, operands[2]));
- emit_insn (gen_ashl<mode>3_unsigned (operands[0], operands[1], neg));
- }
- else
- emit_insn (gen_vlshr<mode>3_imm (operands[0], operands[1], operands[2]));
- DONE;
-})
-
;; 64-bit shifts
;; This pattern loads a 32-bit shift count into a 64-bit NEON register,
{
emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], operands[2]));
DONE;
-})
\ No newline at end of file
+})
+
+;; When operand 2 is an immediate, use the normal expansion to match
+;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_s<mode>_imm for
+;; MVE.
+(define_expand "vashr<mode>3"
+ [(set (match_operand:VDQIW 0 "s_register_operand")
+ (ashiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
+ (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
+ "ARM_HAVE_<MODE>_ARITH"
+{
+ if (s_register_operand (operands[2], <MODE>mode))
+ {
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_mve_vshlq_s<mode> (operands[0], operands[1], neg));
+ DONE;
+ }
+})
+
+;; When operand 2 is an immediate, use the normal expansion to match
+;; gen_vashr<mode>3_imm for Neon and gen_mve_vshrq_n_u<mode>_imm for
+;; MVE.
+(define_expand "vlshr<mode>3"
+ [(set (match_operand:VDQIW 0 "s_register_operand")
+ (lshiftrt:VDQIW (match_operand:VDQIW 1 "s_register_operand")
+ (match_operand:VDQIW 2 "imm_rshift_or_reg_neon")))]
+ "ARM_HAVE_<MODE>_ARITH"
+{
+ if (s_register_operand (operands[2], <MODE>mode))
+ {
+ rtx neg = gen_reg_rtx (<MODE>mode);
+ emit_insn (gen_neg<mode>2 (neg, operands[2]));
+ emit_insn (gen_mve_vshlq_u<mode> (operands[0], operands[1], neg));
+ DONE;
+ }
+})
--- /dev/null
+/* { dg-do assemble } */
+/* { dg-require-effective-target arm_v8_1m_mve_ok } */
+/* { dg-add-options arm_v8_1m_mve } */
+/* { dg-additional-options "-O3" } */
+
+#include <stdint.h>
+
+#define FUNC(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a, TYPE##BITS##_t *b) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = a[i] OP b[i]; \
+ } \
+}
+
+#define FUNC_IMM(SIGN, TYPE, BITS, NB, OP, NAME) \
+ void test_ ## NAME ##_ ## SIGN ## BITS ## x ## NB (TYPE##BITS##_t * __restrict__ dest, TYPE##BITS##_t *a) { \
+ int i; \
+ for (i=0; i<NB; i++) { \
+ dest[i] = a[i] OP 5; \
+ } \
+}
+
+/* 64-bit vectors. */
+FUNC(s, int, 32, 2, >>, vshr)
+FUNC(u, uint, 32, 2, >>, vshr)
+FUNC(s, int, 16, 4, >>, vshr)
+FUNC(u, uint, 16, 4, >>, vshr)
+FUNC(s, int, 8, 8, >>, vshr)
+FUNC(u, uint, 8, 8, >>, vshr)
+
+/* 128-bit vectors. */
+FUNC(s, int, 32, 4, >>, vshr)
+FUNC(u, uint, 32, 4, >>, vshr)
+FUNC(s, int, 16, 8, >>, vshr)
+FUNC(u, uint, 16, 8, >>, vshr)
+FUNC(s, int, 8, 16, >>, vshr)
+FUNC(u, uint, 8, 16, >>, vshr)
+
+/* 64-bit vectors. */
+FUNC_IMM(s, int, 32, 2, >>, vshrimm)
+FUNC_IMM(u, uint, 32, 2, >>, vshrimm)
+FUNC_IMM(s, int, 16, 4, >>, vshrimm)
+FUNC_IMM(u, uint, 16, 4, >>, vshrimm)
+FUNC_IMM(s, int, 8, 8, >>, vshrimm)
+FUNC_IMM(u, uint, 8, 8, >>, vshrimm)
+
+/* 128-bit vectors. */
+FUNC_IMM(s, int, 32, 4, >>, vshrimm)
+FUNC_IMM(u, uint, 32, 4, >>, vshrimm)
+FUNC_IMM(s, int, 16, 8, >>, vshrimm)
+FUNC_IMM(u, uint, 16, 8, >>, vshrimm)
+FUNC_IMM(s, int, 8, 16, >>, vshrimm)
+FUNC_IMM(u, uint, 8, 16, >>, vshrimm)
+
+/* MVE has only 128-bit vectors, so we can vectorize only half of the
+ functions above. */
+/* { dg-final { scan-assembler-times {vshr.s[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */
+/* { dg-final { scan-assembler-times {vshr.u[0-9]+\tq[0-9]+, q[0-9]+} 3 } } */