From 0f64d4f87799554e14d99b37ffbbca1323106e69 Mon Sep 17 00:00:00 2001 From: Philip Reames Date: Thu, 25 May 2023 07:51:14 -0700 Subject: [PATCH] [RISCV] Add test coverage for shuffle/insert idioms which can become v(f)slide1ups --- .../RISCV/rvv/fixed-vector-shuffle-vslide1up.ll | 298 +++++++++++++++++++++ 1 file changed, 298 insertions(+) create mode 100644 llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll new file mode 100644 index 0000000..5015071 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-shuffle-vslide1up.ll @@ -0,0 +1,298 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=riscv32 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+m,+v,+f,+d,+zfh,+experimental-zvfh -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128" + +define <2 x i8> @vslide1up_2xi8(<2 x i8> %v, i8 %b) { +; CHECK-LABEL: vslide1up_2xi8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmv.v.x v10, a0 +; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma +; CHECK-NEXT: vwaddu.vv v9, v10, v8 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v9, a0, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <2 x i8> poison, i8 %b, i64 0 + %v1 = shufflevector <2 x i8> %v, <2 x i8> %vb, <2 x i32> + ret <2 x i8> %v1 +} + +define <4 x i8> @vslide1up_4xi8(<4 x i8> %v, i8 %b) { +; RV32-LABEL: vslide1up_4xi8: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; RV32-NEXT: vmv.s.x v9, a0 +; RV32-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV32-NEXT: vslideup.vi v9, v8, 3 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_4xi8: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vslideup.vi v9, v8, 3 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret + %vb = insertelement <4 x i8> poison, i8 %b, i64 0 + %v1 = shufflevector <4 x i8> %v, <4 x i8> %vb, <4 x i32> + ret <4 x i8> %v1 +} + +define <2 x i16> @vslide1up_2xi16(<2 x i16> %v, i16 %b) { +; RV32-LABEL: vslide1up_2xi16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; RV32-NEXT: vmv.s.x v10, a0 +; RV32-NEXT: vsetvli zero, zero, e16, mf4, ta, ma +; RV32-NEXT: vwaddu.vv v9, v10, v8 +; RV32-NEXT: li a0, -1 +; RV32-NEXT: vwmaccu.vx v9, a0, v8 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_2xi16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; RV64-NEXT: vwaddu.vv v9, v10, v8 +; RV64-NEXT: li a0, -1 +; RV64-NEXT: vwmaccu.vx v9, a0, v8 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret + %vb = insertelement <2 x i16> poison, i16 %b, i64 0 + %v1 = shufflevector <2 x i16> %v, <2 x i16> %vb, <2 x i32> + ret <2 x i16> %v1 +} + +define <4 x i16> @vslide1up_4xi16(<4 x i16> %v, i16 %b) { +; RV32-LABEL: vslide1up_4xi16: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV32-NEXT: vmv.v.x v9, a0 +; RV32-NEXT: vslideup.vi v9, v8, 3 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_4xi16: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vmv.s.x v9, a0 +; RV64-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; RV64-NEXT: vslideup.vi v9, v8, 3 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret + %vb = insertelement <4 x i16> poison, i16 %b, i64 0 + %v1 = shufflevector <4 x i16> %v, <4 x i16> %vb, <4 x i32> + ret <4 x i16> %v1 +} + +define <2 x i32> @vslide1up_2xi32(<2 x i32> %v, i32 %b) { +; RV32-LABEL: vslide1up_2xi32: +; RV32: # %bb.0: +; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; RV32-NEXT: vmv.v.x v10, a0 +; RV32-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; RV32-NEXT: vwaddu.vv v9, v10, v8 +; RV32-NEXT: li a0, -1 +; RV32-NEXT: vwmaccu.vx v9, a0, v8 +; RV32-NEXT: vmv1r.v v8, v9 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_2xi32: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma +; RV64-NEXT: vmv.s.x v10, a0 +; RV64-NEXT: vsetvli zero, zero, e32, mf2, ta, ma +; RV64-NEXT: vwaddu.vv v9, v10, v8 +; RV64-NEXT: li a0, -1 +; RV64-NEXT: vwmaccu.vx v9, a0, v8 +; RV64-NEXT: vmv1r.v v8, v9 +; RV64-NEXT: ret + %vb = insertelement <2 x i32> poison, i32 %b, i64 0 + %v1 = shufflevector <2 x i32> %v, <2 x i32> %vb, <2 x i32> + ret <2 x i32> %v1 +} + +define <4 x i32> @vslide1up_4xi32(<4 x i32> %v, i32 %b) { +; CHECK-LABEL: vslide1up_4xi32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vslideup.vi v9, v8, 3 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <4 x i32> poison, i32 %b, i64 0 + %v1 = shufflevector <4 x i32> %v, <4 x i32> %vb, <4 x i32> + ret <4 x i32> %v1 +} + +define <2 x i64> @vslide1up_2xi64(<2 x i64> %v, i64 %b) { +; RV32-LABEL: vslide1up_2xi64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vslideup.vi v9, v8, 1 +; RV32-NEXT: vmv.v.v v8, v9 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_2xi64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vslideup.vi v9, v8, 1 +; RV64-NEXT: vmv.v.v v8, v9 +; RV64-NEXT: ret + %vb = insertelement <2 x i64> poison, i64 %b, i64 0 + %v1 = shufflevector <2 x i64> %v, <2 x i64> %vb, <2 x i32> + ret <2 x i64> %v1 +} + +define <4 x i64> @vslide1up_4xi64(<4 x i64> %v, i64 %b) { +; RV32-LABEL: vslide1up_4xi64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a0), zero +; RV32-NEXT: vslideup.vi v10, v8, 3 +; RV32-NEXT: vmv.v.v v8, v10 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vslide1up_4xi64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; RV64-NEXT: vmv.v.x v10, a0 +; RV64-NEXT: vslideup.vi v10, v8, 3 +; RV64-NEXT: vmv.v.v v8, v10 +; RV64-NEXT: ret + %vb = insertelement <4 x i64> poison, i64 %b, i64 0 + %v1 = shufflevector <4 x i64> %v, <4 x i64> %vb, <4 x i32> + ret <4 x i64> %v1 +} + +define <2 x half> @vslide1up_2xf16(<2 x half> %v, half %b) { +; CHECK-LABEL: vslide1up_2xf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma +; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma +; CHECK-NEXT: vwaddu.vv v9, v10, v8 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v9, a0, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <2 x half> poison, half %b, i64 0 + %v1 = shufflevector <2 x half> %v, <2 x half> %vb, <2 x i32> + ret <2 x half> %v1 +} + +define <4 x half> @vslide1up_4xf16(<4 x half> %v, half %b) { +; CHECK-LABEL: vslide1up_4xf16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma +; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vslideup.vi v9, v8, 3 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <4 x half> poison, half %b, i64 0 + %v1 = shufflevector <4 x half> %v, <4 x half> %vb, <4 x i32> + ret <4 x half> %v1 +} + +define <2 x float> @vslide1up_2xf32(<2 x float> %v, float %b) { +; CHECK-LABEL: vslide1up_2xf32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma +; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma +; CHECK-NEXT: vwaddu.vv v9, v10, v8 +; CHECK-NEXT: li a0, -1 +; CHECK-NEXT: vwmaccu.vx v9, a0, v8 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <2 x float> poison, float %b, i64 0 + %v1 = shufflevector <2 x float> %v, <2 x float> %vb, <2 x i32> + ret <2 x float> %v1 +} + +define <4 x float> @vslide1up_4xf32(<4 x float> %v, float %b) { +; CHECK-LABEL: vslide1up_4xf32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vslideup.vi v9, v8, 3 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <4 x float> poison, float %b, i64 0 + %v1 = shufflevector <4 x float> %v, <4 x float> %vb, <4 x i32> + ret <4 x float> %v1 +} + +define <2 x double> @vslide1up_2xf64(<2 x double> %v, double %b) { +; CHECK-LABEL: vslide1up_2xf64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vfmv.v.f v9, fa0 +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vmv.v.v v8, v9 +; CHECK-NEXT: ret + %vb = insertelement <2 x double> poison, double %b, i64 0 + %v1 = shufflevector <2 x double> %v, <2 x double> %vb, <2 x i32> + ret <2 x double> %v1 +} + +define <4 x double> @vslide1up_4xf64(<4 x double> %v, double %b) { +; CHECK-LABEL: vslide1up_4xf64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma +; CHECK-NEXT: vfmv.v.f v10, fa0 +; CHECK-NEXT: vslideup.vi v10, v8, 3 +; CHECK-NEXT: vmv.v.v v8, v10 +; CHECK-NEXT: ret + %vb = insertelement <4 x double> poison, double %b, i64 0 + %v1 = shufflevector <4 x double> %v, <4 x double> %vb, <4 x i32> + ret <4 x double> %v1 +} + +define <2 x double> @vslide1up_v2f64_inverted(<2 x double> %v, double %b) { +; CHECK-LABEL: vslide1up_v2f64_inverted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma +; CHECK-NEXT: vrgather.vi v9, v8, 0 +; CHECK-NEXT: vsetvli zero, zero, e64, m1, tu, ma +; CHECK-NEXT: vfmv.s.f v9, fa0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v1 = shufflevector <2 x double> %v, <2 x double> poison, <2 x i32> + %v2 = insertelement <2 x double> %v1, double %b, i64 0 + ret <2 x double> %v2 +} + +define <4 x i8> @vslide1up_4xi8_inverted(<4 x i8> %v, i8 %b) { +; CHECK-LABEL: vslide1up_4xi8_inverted: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vslideup.vi v9, v8, 1 +; CHECK-NEXT: vsetvli zero, zero, e8, mf4, tu, ma +; CHECK-NEXT: vmv.s.x v9, a0 +; CHECK-NEXT: vmv1r.v v8, v9 +; CHECK-NEXT: ret + %v1 = shufflevector <4 x i8> %v, <4 x i8> poison, <4 x i32> + %v2 = insertelement <4 x i8> %v1, i8 %b, i64 0 + ret <4 x i8> %v2 +} -- 2.7.4