From: Christophe Lyon Date: Thu, 11 Mar 2021 11:08:49 +0000 (+0000) Subject: arm: Auto-vectorization for MVE: vld4/vst4 X-Git-Tag: upstream/12.2.0~7684 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4eaf65ed6a6fbeefae28bd850329fb226e76f861;p=platform%2Fupstream%2Fgcc.git arm: Auto-vectorization for MVE: vld4/vst4 This patch enables MVE vld4/vst4 instructions for auto-vectorization. We move the existing expanders from neon.md and enable them for MVE, calling the respective emitter. 2021-03-12 Christophe Lyon gcc/ * config/arm/neon.md (vec_load_lanesxi) (vec_store_lanexoi): Move ... * config/arm/vec-common.md: here. gcc/testsuite/ * gcc.target/arm/simd/mve-vld4.c: New test, derived from slp-perm-3.c --- diff --git a/gcc/config/arm/neon.md b/gcc/config/arm/neon.md index 25d4252..977adef 100644 --- a/gcc/config/arm/neon.md +++ b/gcc/config/arm/neon.md @@ -5620,16 +5620,6 @@ if (BYTES_BIG_ENDIAN) (const_string "neon_load4_4reg")))] ) -(define_expand "vec_load_lanesxi" - [(match_operand:XI 0 "s_register_operand") - (match_operand:XI 1 "neon_struct_operand") - (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] - "TARGET_NEON" -{ - emit_insn (gen_neon_vld4 (operands[0], operands[1])); - DONE; -}) - (define_expand "neon_vld4" [(match_operand:XI 0 "s_register_operand") (match_operand:XI 1 "neon_struct_operand") @@ -5821,16 +5811,6 @@ if (BYTES_BIG_ENDIAN) (const_string "neon_store4_4reg")))] ) -(define_expand "vec_store_lanesxi" - [(match_operand:XI 0 "neon_struct_operand") - (match_operand:XI 1 "s_register_operand") - (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] - "TARGET_NEON" -{ - emit_insn (gen_neon_vst4 (operands[0], operands[1])); - DONE; -}) - (define_expand "neon_vst4" [(match_operand:XI 0 "neon_struct_operand") (match_operand:XI 1 "s_register_operand") diff --git a/gcc/config/arm/vec-common.md b/gcc/config/arm/vec-common.md index 0b79e68..e8b2901 100644 --- a/gcc/config/arm/vec-common.md +++ b/gcc/config/arm/vec-common.md @@ -513,3 +513,29 @@ emit_insn (gen_mve_vst2q (operands[0], operands[1])); DONE; }) + +(define_expand "vec_load_lanesxi" + [(match_operand:XI 0 "s_register_operand") + (match_operand:XI 1 "neon_struct_operand") + (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_NEON || TARGET_HAVE_MVE" +{ + if (TARGET_NEON) + emit_insn (gen_neon_vld4 (operands[0], operands[1])); + else + emit_insn (gen_mve_vld4q (operands[0], operands[1])); + DONE; +}) + +(define_expand "vec_store_lanesxi" + [(match_operand:XI 0 "neon_struct_operand") + (match_operand:XI 1 "s_register_operand") + (unspec:VQ2 [(const_int 0)] UNSPEC_VSTRUCTDUMMY)] + "TARGET_NEON || TARGET_HAVE_MVE" +{ + if (TARGET_NEON) + emit_insn (gen_neon_vst4 (operands[0], operands[1])); + else + emit_insn (gen_mve_vst4q (operands[0], operands[1])); + DONE; +}) diff --git a/gcc/testsuite/gcc.target/arm/simd/mve-vld4.c b/gcc/testsuite/gcc.target/arm/simd/mve-vld4.c new file mode 100644 index 0000000..ce3e755 --- /dev/null +++ b/gcc/testsuite/gcc.target/arm/simd/mve-vld4.c @@ -0,0 +1,140 @@ +/* { dg-do assemble } */ +/* { dg-require-effective-target arm_v8_1m_mve_fp_ok } */ +/* { dg-add-options arm_v8_1m_mve_fp } */ +/* { dg-additional-options "-O3" } */ + +#include + +#define M00 100 +#define M10 216 +#define M20 23 +#define M30 237 +#define M01 1322 +#define M11 13 +#define M21 27271 +#define M31 2280 +#define M02 74 +#define M12 191 +#define M22 500 +#define M32 111 +#define M03 134 +#define M13 117 +#define M23 11 +#define M33 771 + +#define N 128 + +/* Integer tests. */ +#define FUNC(SIGN, TYPE, BITS) \ + void foo_##SIGN##BITS##x (TYPE##BITS##_t *__restrict__ pInput, \ + TYPE##BITS##_t *__restrict__ pOutput) \ + { \ + unsigned int i; \ + TYPE##BITS##_t a, b, c, d; \ + \ + for (i = 0; i < N / BITS; i++) \ + { \ + a = *pInput++; \ + b = *pInput++; \ + c = *pInput++; \ + d = *pInput++; \ + \ + *pOutput++ = M00 * a + M01 * b + M02 * c + M03 * d; \ + *pOutput++ = M10 * a + M11 * b + M12 * c + M13 * d; \ + *pOutput++ = M20 * a + M21 * b + M22 * c + M23 * d; \ + *pOutput++ = M30 * a + M31 * b + M32 * c + M33 * d; \ + } \ + } + +FUNC(s, int, 8) +FUNC(u, uint, 8) +FUNC(s, int, 16) +FUNC(u, uint, 16) +FUNC(s, int, 32) +FUNC(u, uint, 32) + +/* float test, keep the macro because it's similar to the above, but does not + need the ##BITS##_t. */ +#define FUNC_FLOAT(SIGN, TYPE, BITS) \ + void foo_##SIGN##BITS##x (TYPE *__restrict__ pInput, \ + TYPE *__restrict__ pOutput) \ + { \ + unsigned int i; \ + TYPE a, b, c, d; \ + \ + for (i = 0; i < N / BITS; i++) \ + { \ + a = *pInput++; \ + b = *pInput++; \ + c = *pInput++; \ + d = *pInput++; \ + \ + *pOutput++ = M00 * a + M01 * b + M02 * c + M03 * d; \ + *pOutput++ = M10 * a + M11 * b + M12 * c + M13 * d; \ + *pOutput++ = M20 * a + M21 * b + M22 * c + M23 * d; \ + *pOutput++ = M30 * a + M31 * b + M32 * c + M33 * d; \ + } \ + } + +FUNC_FLOAT(f, float, 32) + +/* __fp16 test, needs explicit casts to avoid conversions to floating-point and + failure to vectorize. */ +__fp16 M00_fp16 = 100.0f16; +__fp16 M10_fp16 = 216.0f16; +__fp16 M20_fp16 = 23.0f16; +__fp16 M30_fp16 = 237.0f16; +__fp16 M01_fp16 = 1322.0f16; +__fp16 M11_fp16 = 13.0f16; +__fp16 M21_fp16 = 27271.0f16; +__fp16 M31_fp16 = 2280.0f16; +__fp16 M02_fp16 = 74.0f16; +__fp16 M12_fp16 = 191.0f16; +__fp16 M22_fp16 = 500.0f16; +__fp16 M32_fp16 = 111.0f16; +__fp16 M03_fp16 = 134.0f16; +__fp16 M13_fp16 = 117.0f16; +__fp16 M23_fp16 = 11.0f16; +__fp16 M33_fp16 = 771.0f16; + +#define FUNC_FLOAT_FP16(SIGN, TYPE, BITS) \ + void foo_##SIGN##BITS##x (TYPE *__restrict__ pInput, \ + TYPE *__restrict__ pOutput) \ + { \ + unsigned int i; \ + TYPE a, b, c, d; \ + \ + for (i = 0; i < N / BITS; i++) \ + { \ + a = *pInput++; \ + b = *pInput++; \ + c = *pInput++; \ + d = *pInput++; \ + \ + TYPE ab, cd; \ + ab = (__fp16)(M00_fp16 * a) + (__fp16)(M01_fp16 * b); \ + cd = (__fp16)(M02_fp16 * c) + (__fp16)(M03_fp16 * d); \ + *pOutput++ = ab + cd; \ + ab = (__fp16)(M10_fp16 * a) + (__fp16)(M11_fp16 * b); \ + cd = (__fp16)(M12_fp16 * c) + (__fp16)(M13_fp16 * d); \ + *pOutput++ = ab + cd; \ + ab = (__fp16)(M20_fp16 * a) + (__fp16)(M21_fp16 * b); \ + cd = (__fp16)(M22_fp16 * c) + (__fp16)(M23_fp16 * d); \ + *pOutput++ = ab + cd; \ + ab = (__fp16)(M30_fp16 * a) + (__fp16)(M31_fp16 * b); \ + cd = (__fp16)(M32_fp16 * c) + (__fp16)(M33_fp16 * d); \ + *pOutput++ = ab + cd; \ + } \ + } + +FUNC_FLOAT_FP16(f, __fp16, 16) + +/* vld4X.8 is used for signed and unsigned chars: 2 * 4. */ +/* vld4X.16 is used for signed and unsigned shorts and __fp16: 3 * 4. */ +/* vld4X.32 is used for signed and unsigned ints and float: 3 * 4. */ +/* { dg-final { scan-assembler-times {vld4[0123].8\t.q[0-9]+, q[0-9]+, q[0-9]+, q[0-9]+., } 8 } } */ +/* { dg-final { scan-assembler-times {vld4[0123].16\t.q[0-9]+, q[0-9]+, q[0-9]+, q[0-9]+., } 12 } } */ +/* { dg-final { scan-assembler-times {vld4[0123].32\t.q[0-9]+, q[0-9]+, q[0-9]+, q[0-9]+., } 12 } } */ +/* { dg-final { scan-assembler-times {vst4[0123].8\t.q[0-9]+, q[0-9]+, q[0-9]+, q[0-9]+., } 8 } } */ +/* { dg-final { scan-assembler-times {vst4[0123].16\t.q[0-9]+, q[0-9]+, q[0-9]+, q[0-9]+., } 12 } } */ +/* { dg-final { scan-assembler-times {vst4[0123].32\t.q[0-9]+, q[0-9]+, q[0-9]+, q[0-9]+., } 12 } } */