SDValue Res = DAG.getUNDEF(ContainerVT);
if (HiV) {
- // If we are doing a SLIDEDOWN+SLIDEUP, reduce the VL for the SLIDEDOWN.
- // FIXME: If we are only doing a SLIDEDOWN, don't reduce the VL as it
- // causes multiple vsetvlis in some test cases such as lowering
- // reduce.mul
- SDValue DownVL = VL;
- if (LoV)
- DownVL = DAG.getConstant(InvRotate, DL, XLenVT);
+ // Even though we could use a smaller VL, don't to avoid a vsetivli
+ // toggle.
Res = getVSlidedown(DAG, Subtarget, DL, ContainerVT, Res, HiV,
- DAG.getConstant(Rotation, DL, XLenVT), TrueMask,
- DownVL);
+ DAG.getConstant(Rotation, DL, XLenVT), TrueMask, VL);
}
if (LoV)
Res = getVSlideup(DAG, Subtarget, DL, ContainerVT, Res, LoV,
define <2 x i8> @reverse_v2i8(<2 x i8> %a) {
; CHECK-LABEL: reverse_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <2 x i16> @reverse_v2i16(<2 x i16> %a) {
; CHECK-LABEL: reverse_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <2 x i32> @reverse_v2i32(<2 x i32> %a) {
; CHECK-LABEL: reverse_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <2 x i64> @reverse_v2i64(<2 x i64> %a) {
; CHECK-LABEL: reverse_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
define <2 x half> @reverse_v2f16(<2 x half> %a) {
; CHECK-LABEL: reverse_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <2 x float> @reverse_v2f32(<2 x float> %a) {
; CHECK-LABEL: reverse_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <2 x double> @reverse_v2f64(<2 x double> %a) {
; CHECK-LABEL: reverse_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
define <8 x float> @splice_unary(<8 x float> %x) {
; CHECK-LABEL: splice_unary:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 7, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 7
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
define <8 x double> @splice_unary2(<8 x double> %x) {
; CHECK-LABEL: splice_unary2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v8, 6
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vslidedown.vi v12, v8, 6
; CHECK-NEXT: vslideup.vi v12, v8, 2
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
define <8 x float> @splice_binary(<8 x float> %x, <8 x float> %y) {
; CHECK-LABEL: splice_binary:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 6, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vslideup.vi v8, v10, 6
; CHECK-NEXT: ret
%s = shufflevector <8 x float> %x, <8 x float> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
define <8 x double> @splice_binary2(<8 x double> %x, <8 x double> %y) {
; CHECK-LABEL: splice_binary2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 3, e64, m4, ta, ma
-; CHECK-NEXT: vslidedown.vi v12, v12, 5
; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
+; CHECK-NEXT: vslidedown.vi v12, v12, 5
; CHECK-NEXT: vslideup.vi v12, v8, 3
; CHECK-NEXT: vmv.v.v v8, v12
; CHECK-NEXT: ret
define <8 x i16> @splice_unary(<8 x i16> %x) {
; CHECK-LABEL: splice_unary:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 2
; CHECK-NEXT: vslideup.vi v9, v8, 6
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
define <8 x i32> @splice_unary2(<8 x i32> %x) {
; CHECK-LABEL: splice_unary2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 5
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 5
; CHECK-NEXT: vslideup.vi v10, v8, 3
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
define <8 x i16> @splice_binary(<8 x i16> %x, <8 x i16> %y) {
; CHECK-LABEL: splice_binary:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 6, e16, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vslideup.vi v8, v9, 6
; CHECK-NEXT: ret
%s = shufflevector <8 x i16> %x, <8 x i16> %y, <8 x i32> <i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 undef, i32 9>
define <8 x i32> @splice_binary2(<8 x i32> %x, <8 x i32> %y) {
; CHECK-LABEL: splice_binary2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 3, e32, m2, ta, ma
-; CHECK-NEXT: vslidedown.vi v8, v8, 5
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
+; CHECK-NEXT: vslidedown.vi v8, v8, 5
; CHECK-NEXT: vslideup.vi v8, v10, 3
; CHECK-NEXT: ret
%s = shufflevector <8 x i32> %x, <8 x i32> %y, <8 x i32> <i32 undef, i32 6, i32 7, i32 8, i32 9, i32 10, i32 11, i32 12>
define <2 x i8> @v2i8(<2 x i8> %a) {
; CHECK-LABEL: v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <4 x i8> @v2i8_2(<2 x i8> %a, <2 x i8> %b) {
; CHECK-LABEL: v2i8_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1
define <2 x i16> @v2i16(<2 x i16> %a) {
; CHECK-LABEL: v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <4 x i16> @v2i16_2(<2 x i16> %a, <2 x i16> %b) {
; CHECK-LABEL: v2i16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1
define <2 x i32> @v2i32(<2 x i32> %a) {
; CHECK-LABEL: v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <4 x i32> @v2i32_2(<2 x i32> %a, < 2 x i32> %b) {
; CHECK-LABEL: v2i32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1
define <2 x i64> @v2i64(<2 x i64> %a) {
; CHECK-LABEL: v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
define <4 x i64> @v2i64_2(<2 x i64> %a, < 2 x i64> %b) {
; CHECK-LABEL: v2i64_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1
define <2 x half> @v2f16(<2 x half> %a) {
; CHECK-LABEL: v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <4 x half> @v2f16_2(<2 x half> %a, <2 x half> %b) {
; CHECK-LABEL: v2f16_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1
define <2 x float> @v2f32(<2 x float> %a) {
; CHECK-LABEL: v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <4 x float> @v2f32_2(<2 x float> %a, <2 x float> %b) {
; CHECK-LABEL: v2f32_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1
define <2 x double> @v2f64(<2 x double> %a) {
; CHECK-LABEL: v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v9, v8, 1
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv.v.v v8, v9
; CHECK-NEXT: ret
define <4 x double> @v2f64_2(<2 x double> %a, < 2 x double> %b) {
; CHECK-LABEL: v2f64_2:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
-; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
+; CHECK-NEXT: vslidedown.vi v10, v8, 1
; CHECK-NEXT: vslideup.vi v10, v8, 1
; CHECK-NEXT: vslidedown.vi v8, v9, 1
; CHECK-NEXT: vslideup.vi v8, v9, 1