From 186a7f81e813a59b68fadfe7ea90c348bb760c56 Mon Sep 17 00:00:00 2001 From: David Green Date: Sun, 9 Aug 2020 11:09:49 +0100 Subject: [PATCH] [ARM] Add VADDV and VMLAV patterns for v16i16 This adds patterns for v16i16's vecreduce, using all the existing code to go via an i32 VADDV/VMLAV and truncating the result. Differential Revision: https://reviews.llvm.org/D85452 --- llvm/lib/Target/ARM/ARMISelLowering.cpp | 24 ++ llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll | 148 +------ llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll | 312 +-------------- llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll | 252 +----------- llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll | 276 +------------ llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll | 460 +--------------------- 6 files changed, 67 insertions(+), 1405 deletions(-) diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index dd53b10..42c0db7 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -14808,6 +14808,12 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, return Create64bitNode(ARMISD::VADDLVs, {A}); if (SDValue A = IsVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32})) return Create64bitNode(ARMISD::VADDLVu, {A}); + if (SDValue A = IsVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8})) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VADDVs, dl, MVT::i32, A)); + if (SDValue A = IsVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8})) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VADDVu, dl, MVT::i32, A)); SDValue Mask; if (SDValue A = IsPredVADDV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, Mask)) @@ -14818,6 +14824,12 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, return Create64bitNode(ARMISD::VADDLVps, {A, Mask}); if (SDValue A = IsPredVADDV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v4i32}, Mask)) return Create64bitNode(ARMISD::VADDLVpu, {A, Mask}); + if (SDValue A = IsPredVADDV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, Mask)) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VADDVps, dl, MVT::i32, A, Mask)); + if (SDValue A = IsPredVADDV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, Mask)) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VADDVpu, dl, MVT::i32, A, Mask)); SDValue A, B; if (IsVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B)) @@ -14828,6 +14840,12 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, return Create64bitNode(ARMISD::VMLALVs, {A, B}); if (IsVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B)) return Create64bitNode(ARMISD::VMLALVu, {A, B}); + if (IsVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B)) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VMLAVs, dl, MVT::i32, A, B)); + if (IsVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B)) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VMLAVu, dl, MVT::i32, A, B)); if (IsPredVMLAV(MVT::i32, ISD::SIGN_EXTEND, {MVT::v8i16, MVT::v16i8}, A, B, Mask)) return DAG.getNode(ARMISD::VMLAVps, dl, ResVT, A, B, Mask); @@ -14837,6 +14855,12 @@ static SDValue PerformVECREDUCE_ADDCombine(SDNode *N, SelectionDAG &DAG, return Create64bitNode(ARMISD::VMLALVps, {A, B, Mask}); if (IsPredVMLAV(MVT::i64, ISD::ZERO_EXTEND, {MVT::v8i16, MVT::v4i32}, A, B, Mask)) return Create64bitNode(ARMISD::VMLALVpu, {A, B, Mask}); + if (IsPredVMLAV(MVT::i16, ISD::SIGN_EXTEND, {MVT::v16i8}, A, B, Mask)) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VMLAVps, dl, MVT::i32, A, B, Mask)); + if (IsPredVMLAV(MVT::i16, ISD::ZERO_EXTEND, {MVT::v16i8}, A, B, Mask)) + return DAG.getNode(ISD::TRUNCATE, dl, ResVT, + DAG.getNode(ARMISD::VMLAVpu, dl, MVT::i32, A, B, Mask)); return SDValue(); } diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll index 2f483b3..35eecab 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add.ll @@ -333,42 +333,7 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x) { ; CHECK-LABEL: add_v16i8_v16i16_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmovlb.u8 q1, q1 -; CHECK-NEXT: vmovlb.u8 q0, q2 -; CHECK-NEXT: vadd.i16 q0, q0, q1 -; CHECK-NEXT: vaddv.u16 r0, q0 +; CHECK-NEXT: vaddv.u8 r0, q0 ; CHECK-NEXT: uxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -380,42 +345,7 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x) { ; CHECK-LABEL: add_v16i8_v16i16_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmovlb.s8 q1, q1 -; CHECK-NEXT: vmovlb.s8 q0, q2 -; CHECK-NEXT: vadd.i16 q0, q0, q1 -; CHECK-NEXT: vaddv.u16 r0, q0 +; CHECK-NEXT: vaddv.s8 r0, q0 ; CHECK-NEXT: sxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -1120,42 +1050,7 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmovlb.u8 q1, q1 -; CHECK-NEXT: vmovlb.u8 q0, q2 -; CHECK-NEXT: vadd.i16 q0, q0, q1 -; CHECK-NEXT: vaddva.u16 r0, q0 +; CHECK-NEXT: vaddva.u8 r0, q0 ; CHECK-NEXT: uxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -1168,42 +1063,7 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmovlb.s8 q1, q1 -; CHECK-NEXT: vmovlb.s8 q0, q2 -; CHECK-NEXT: vadd.i16 q0, q0, q1 -; CHECK-NEXT: vaddva.u16 r0, q0 +; CHECK-NEXT: vaddva.s8 r0, q0 ; CHECK-NEXT: sxth r0, r0 ; CHECK-NEXT: bx lr entry: diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll index eb47d1d..0f3aacf 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-addpred.ll @@ -607,82 +607,8 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x, <16 x i8> %b) { ; CHECK-LABEL: add_v16i8_v16i16_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vcmp.i8 eq, q1, zr -; CHECK-NEXT: vmov.i8 q1, #0x0 -; CHECK-NEXT: vmov.i8 q2, #0xff -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpsel q1, q2, q1 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vcmp.i16 ne, q2, zr -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmovlb.u8 q2, q2 -; CHECK-NEXT: vpsel q2, q2, q3 -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmovlb.u8 q0, q1 -; CHECK-NEXT: vpt.i16 ne, q3, zr -; CHECK-NEXT: vaddt.i16 q2, q2, q0 -; CHECK-NEXT: vaddv.u16 r0, q2 +; CHECK-NEXT: vpt.i8 eq, q1, zr +; CHECK-NEXT: vaddvt.u8 r0, q0 ; CHECK-NEXT: uxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -696,82 +622,8 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x, <16 x i8> %b) { ; CHECK-LABEL: add_v16i8_v16i16_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vcmp.i8 eq, q1, zr -; CHECK-NEXT: vmov.i8 q1, #0x0 -; CHECK-NEXT: vmov.i8 q2, #0xff -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpsel q1, q2, q1 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vcmp.i16 ne, q2, zr -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmovlb.s8 q2, q2 -; CHECK-NEXT: vpsel q2, q2, q3 -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmovlb.s8 q0, q1 -; CHECK-NEXT: vpt.i16 ne, q3, zr -; CHECK-NEXT: vaddt.i16 q2, q2, q0 -; CHECK-NEXT: vaddv.u16 r0, q2 +; CHECK-NEXT: vpt.i8 eq, q1, zr +; CHECK-NEXT: vaddvt.s8 r0, q0 ; CHECK-NEXT: sxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -2170,82 +2022,8 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, <16 x i8> %b, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vcmp.i8 eq, q1, zr -; CHECK-NEXT: vmov.i8 q1, #0x0 -; CHECK-NEXT: vmov.i8 q2, #0xff -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpsel q1, q2, q1 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vcmp.i16 ne, q2, zr -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmovlb.u8 q2, q2 -; CHECK-NEXT: vpsel q2, q2, q3 -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmovlb.u8 q0, q1 -; CHECK-NEXT: vpt.i16 ne, q3, zr -; CHECK-NEXT: vaddt.i16 q2, q2, q0 -; CHECK-NEXT: vaddva.u16 r0, q2 +; CHECK-NEXT: vpt.i8 eq, q1, zr +; CHECK-NEXT: vaddvat.u8 r0, q0 ; CHECK-NEXT: uxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -2260,82 +2038,8 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, <16 x i8> %b, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vcmp.i8 eq, q1, zr -; CHECK-NEXT: vmov.i8 q1, #0x0 -; CHECK-NEXT: vmov.i8 q2, #0xff -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpsel q1, q2, q1 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vcmp.i16 ne, q2, zr -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmovlb.s8 q2, q2 -; CHECK-NEXT: vpsel q2, q2, q3 -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmovlb.s8 q0, q1 -; CHECK-NEXT: vpt.i16 ne, q3, zr -; CHECK-NEXT: vaddt.i16 q2, q2, q0 -; CHECK-NEXT: vaddva.u16 r0, q2 +; CHECK-NEXT: vpt.i8 eq, q1, zr +; CHECK-NEXT: vaddvat.s8 r0, q0 ; CHECK-NEXT: sxth r0, r0 ; CHECK-NEXT: bx lr entry: diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll index 094f81e..539f760 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-loops.ll @@ -2127,109 +2127,21 @@ define signext i16 @add16i16(i8* noalias nocapture readonly %x, i32 %n) { ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13} -; CHECK-NEXT: cmp r1, #0 -; CHECK-NEXT: beq.w .LBB25_3 +; CHECK-NEXT: cbz r1, .LBB25_4 ; CHECK-NEXT: @ %bb.1: @ %vector.ph -; CHECK-NEXT: add.w r2, r1, #15 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: bic r2, r2, #15 -; CHECK-NEXT: vmov.i8 q0, #0x0 -; CHECK-NEXT: subs r2, #16 -; CHECK-NEXT: vmov.i8 q1, #0xff -; CHECK-NEXT: vmov.i32 q2, #0x0 -; CHECK-NEXT: add.w lr, r3, r2, lsr #4 ; CHECK-NEXT: movs r2, #0 -; CHECK-NEXT: dls lr, lr +; CHECK-NEXT: dlstp.8 lr, r1 ; CHECK-NEXT: .LBB25_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmov q4, q0 -; CHECK-NEXT: vctp.8 r1 -; CHECK-NEXT: vpstt -; CHECK-NEXT: vmovt q4, q1 -; CHECK-NEXT: vldrbt.u8 q3, [r0], #16 -; CHECK-NEXT: vmov.u8 r3, q4[0] -; CHECK-NEXT: vmov.16 q5[0], r3 -; CHECK-NEXT: vmov.u8 r3, q4[1] -; CHECK-NEXT: vmov.16 q5[1], r3 -; CHECK-NEXT: vmov.u8 r3, q4[2] -; CHECK-NEXT: vmov.16 q5[2], r3 -; CHECK-NEXT: vmov.u8 r3, q4[3] -; CHECK-NEXT: vmov.16 q5[3], r3 -; CHECK-NEXT: vmov.u8 r3, q4[4] -; CHECK-NEXT: vmov.16 q5[4], r3 -; CHECK-NEXT: vmov.u8 r3, q4[5] -; CHECK-NEXT: vmov.16 q5[5], r3 -; CHECK-NEXT: vmov.u8 r3, q4[6] -; CHECK-NEXT: vmov.16 q5[6], r3 -; CHECK-NEXT: vmov.u8 r3, q4[7] -; CHECK-NEXT: vmov.16 q5[7], r3 -; CHECK-NEXT: vmov.u8 r3, q3[0] -; CHECK-NEXT: vcmp.i16 ne, q5, zr -; CHECK-NEXT: vmov.16 q5[0], r3 -; CHECK-NEXT: vmov.u8 r3, q3[1] -; CHECK-NEXT: subs r1, #16 -; CHECK-NEXT: vmov.16 q5[1], r3 -; CHECK-NEXT: vmov.u8 r3, q3[2] -; CHECK-NEXT: vmov.16 q5[2], r3 -; CHECK-NEXT: vmov.u8 r3, q3[3] -; CHECK-NEXT: vmov.16 q5[3], r3 -; CHECK-NEXT: vmov.u8 r3, q3[4] -; CHECK-NEXT: vmov.16 q5[4], r3 -; CHECK-NEXT: vmov.u8 r3, q3[5] -; CHECK-NEXT: vmov.16 q5[5], r3 -; CHECK-NEXT: vmov.u8 r3, q3[6] -; CHECK-NEXT: vmov.16 q5[6], r3 -; CHECK-NEXT: vmov.u8 r3, q3[7] -; CHECK-NEXT: vmov.16 q5[7], r3 -; CHECK-NEXT: vmov.u8 r3, q4[8] -; CHECK-NEXT: vmovlb.u8 q6, q5 -; CHECK-NEXT: vmov q5, q2 -; CHECK-NEXT: vpst -; CHECK-NEXT: vmovt q5, q6 -; CHECK-NEXT: vmov.16 q6[0], r3 -; CHECK-NEXT: vmov.u8 r3, q4[9] -; CHECK-NEXT: vmov.16 q6[1], r3 -; CHECK-NEXT: vmov.u8 r3, q4[10] -; CHECK-NEXT: vmov.16 q6[2], r3 -; CHECK-NEXT: vmov.u8 r3, q4[11] -; CHECK-NEXT: vmov.16 q6[3], r3 -; CHECK-NEXT: vmov.u8 r3, q4[12] -; CHECK-NEXT: vmov.16 q6[4], r3 -; CHECK-NEXT: vmov.u8 r3, q4[13] -; CHECK-NEXT: vmov.16 q6[5], r3 -; CHECK-NEXT: vmov.u8 r3, q4[14] -; CHECK-NEXT: vmov.16 q6[6], r3 -; CHECK-NEXT: vmov.u8 r3, q4[15] -; CHECK-NEXT: vmov.16 q6[7], r3 -; CHECK-NEXT: vmov.u8 r3, q3[8] -; CHECK-NEXT: vmov.16 q4[0], r3 -; CHECK-NEXT: vmov.u8 r3, q3[9] -; CHECK-NEXT: vmov.16 q4[1], r3 -; CHECK-NEXT: vmov.u8 r3, q3[10] -; CHECK-NEXT: vmov.16 q4[2], r3 -; CHECK-NEXT: vmov.u8 r3, q3[11] -; CHECK-NEXT: vmov.16 q4[3], r3 -; CHECK-NEXT: vmov.u8 r3, q3[12] -; CHECK-NEXT: vmov.16 q4[4], r3 -; CHECK-NEXT: vmov.u8 r3, q3[13] -; CHECK-NEXT: vmov.16 q4[5], r3 -; CHECK-NEXT: vmov.u8 r3, q3[14] -; CHECK-NEXT: vmov.16 q4[6], r3 -; CHECK-NEXT: vmov.u8 r3, q3[15] -; CHECK-NEXT: vmov.16 q4[7], r3 -; CHECK-NEXT: vmovlb.u8 q3, q4 -; CHECK-NEXT: vpt.i16 ne, q6, zr -; CHECK-NEXT: vaddt.i16 q5, q5, q3 -; CHECK-NEXT: vaddva.u16 r2, q5 -; CHECK-NEXT: le lr, .LBB25_2 -; CHECK-NEXT: b .LBB25_4 -; CHECK-NEXT: .LBB25_3: +; CHECK-NEXT: vldrb.u8 q0, [r0], #16 +; CHECK-NEXT: vaddva.u8 r2, q0 +; CHECK-NEXT: letp lr, .LBB25_2 +; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup +; CHECK-NEXT: sxth r0, r2 +; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: .LBB25_4: ; CHECK-NEXT: movs r2, #0 -; CHECK-NEXT: .LBB25_4: @ %for.cond.cleanup ; CHECK-NEXT: sxth r0, r2 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13} ; CHECK-NEXT: pop {r7, pc} entry: %cmp8.not = icmp eq i32 %n, 0 @@ -2266,148 +2178,22 @@ define signext i16 @mla16i16(i8* noalias nocapture readonly %x, i8* noalias noca ; CHECK: @ %bb.0: @ %entry ; CHECK-NEXT: .save {r7, lr} ; CHECK-NEXT: push {r7, lr} -; CHECK-NEXT: .vsave {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: vpush {d8, d9, d10, d11, d12, d13, d14, d15} -; CHECK-NEXT: .pad #24 -; CHECK-NEXT: sub sp, #24 -; CHECK-NEXT: cmp r2, #0 -; CHECK-NEXT: beq.w .LBB26_3 +; CHECK-NEXT: cbz r2, .LBB26_4 ; CHECK-NEXT: @ %bb.1: @ %vector.ph -; CHECK-NEXT: add.w r3, r2, #15 -; CHECK-NEXT: vmov.i8 q0, #0x0 -; CHECK-NEXT: bic r3, r3, #15 -; CHECK-NEXT: vmov.i8 q1, #0xff -; CHECK-NEXT: sub.w r12, r3, #16 -; CHECK-NEXT: movs r3, #1 -; CHECK-NEXT: vmov.i32 q2, #0x0 -; CHECK-NEXT: add.w lr, r3, r12, lsr #4 ; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: dls lr, lr -; CHECK-NEXT: vstrw.32 q1, [sp] @ 16-byte Spill +; CHECK-NEXT: dlstp.8 lr, r2 ; CHECK-NEXT: .LBB26_2: @ %vector.body ; CHECK-NEXT: @ =>This Inner Loop Header: Depth=1 -; CHECK-NEXT: vmov q3, q0 -; CHECK-NEXT: vldrw.u32 q1, [sp] @ 16-byte Reload -; CHECK-NEXT: vctp.8 r2 -; CHECK-NEXT: vpsttt -; CHECK-NEXT: vmovt q3, q1 -; CHECK-NEXT: vldrbt.u8 q4, [r1], #16 -; CHECK-NEXT: vldrbt.u8 q5, [r0], #16 -; CHECK-NEXT: vmov.u8 r3, q3[0] -; CHECK-NEXT: vmov q6, q2 -; CHECK-NEXT: vmov.16 q1[0], r3 -; CHECK-NEXT: vmov.u8 r3, q3[1] -; CHECK-NEXT: vmov.16 q1[1], r3 -; CHECK-NEXT: vmov.u8 r3, q3[2] -; CHECK-NEXT: vmov.16 q1[2], r3 -; CHECK-NEXT: vmov.u8 r3, q3[3] -; CHECK-NEXT: vmov.16 q1[3], r3 -; CHECK-NEXT: vmov.u8 r3, q3[4] -; CHECK-NEXT: vmov.16 q1[4], r3 -; CHECK-NEXT: vmov.u8 r3, q3[5] -; CHECK-NEXT: vmov.16 q1[5], r3 -; CHECK-NEXT: vmov.u8 r3, q3[6] -; CHECK-NEXT: vmov.16 q1[6], r3 -; CHECK-NEXT: vmov.u8 r3, q3[7] -; CHECK-NEXT: vmov.16 q1[7], r3 -; CHECK-NEXT: vmov.u8 r3, q5[0] -; CHECK-NEXT: vcmp.i16 ne, q1, zr -; CHECK-NEXT: vmov.16 q1[0], r3 -; CHECK-NEXT: vmov.u8 r3, q5[1] -; CHECK-NEXT: subs r2, #16 -; CHECK-NEXT: vmov.16 q1[1], r3 -; CHECK-NEXT: vmov.u8 r3, q5[2] -; CHECK-NEXT: vmov.16 q1[2], r3 -; CHECK-NEXT: vmov.u8 r3, q5[3] -; CHECK-NEXT: vmov.16 q1[3], r3 -; CHECK-NEXT: vmov.u8 r3, q5[4] -; CHECK-NEXT: vmov.16 q1[4], r3 -; CHECK-NEXT: vmov.u8 r3, q5[5] -; CHECK-NEXT: vmov.16 q1[5], r3 -; CHECK-NEXT: vmov.u8 r3, q5[6] -; CHECK-NEXT: vmov.16 q1[6], r3 -; CHECK-NEXT: vmov.u8 r3, q5[7] -; CHECK-NEXT: vmov.16 q1[7], r3 -; CHECK-NEXT: vmov.u8 r3, q4[0] -; CHECK-NEXT: vmovlb.u8 q7, q1 -; CHECK-NEXT: vmov.16 q1[0], r3 -; CHECK-NEXT: vmov.u8 r3, q4[1] -; CHECK-NEXT: vmov.16 q1[1], r3 -; CHECK-NEXT: vmov.u8 r3, q4[2] -; CHECK-NEXT: vmov.16 q1[2], r3 -; CHECK-NEXT: vmov.u8 r3, q4[3] -; CHECK-NEXT: vmov.16 q1[3], r3 -; CHECK-NEXT: vmov.u8 r3, q4[4] -; CHECK-NEXT: vmov.16 q1[4], r3 -; CHECK-NEXT: vmov.u8 r3, q4[5] -; CHECK-NEXT: vmov.16 q1[5], r3 -; CHECK-NEXT: vmov.u8 r3, q4[6] -; CHECK-NEXT: vmov.16 q1[6], r3 -; CHECK-NEXT: vmov.u8 r3, q4[7] -; CHECK-NEXT: vmov.16 q1[7], r3 -; CHECK-NEXT: vmov.u8 r3, q4[8] -; CHECK-NEXT: vmovlb.u8 q1, q1 -; CHECK-NEXT: vpst -; CHECK-NEXT: vmult.i16 q6, q1, q7 -; CHECK-NEXT: vmov.16 q7[0], r3 -; CHECK-NEXT: vmov.u8 r3, q4[9] -; CHECK-NEXT: vmov.16 q7[1], r3 -; CHECK-NEXT: vmov.u8 r3, q4[10] -; CHECK-NEXT: vmov.16 q7[2], r3 -; CHECK-NEXT: vmov.u8 r3, q4[11] -; CHECK-NEXT: vmov.16 q7[3], r3 -; CHECK-NEXT: vmov.u8 r3, q4[12] -; CHECK-NEXT: vmov.16 q7[4], r3 -; CHECK-NEXT: vmov.u8 r3, q4[13] -; CHECK-NEXT: vmov.16 q7[5], r3 -; CHECK-NEXT: vmov.u8 r3, q4[14] -; CHECK-NEXT: vmov.16 q7[6], r3 -; CHECK-NEXT: vmov.u8 r3, q5[8] -; CHECK-NEXT: vmov.16 q1[0], r3 -; CHECK-NEXT: vmov.u8 r3, q5[9] -; CHECK-NEXT: vmov.16 q1[1], r3 -; CHECK-NEXT: vmov.u8 r3, q5[10] -; CHECK-NEXT: vmov.16 q1[2], r3 -; CHECK-NEXT: vmov.u8 r3, q5[11] -; CHECK-NEXT: vmov.16 q1[3], r3 -; CHECK-NEXT: vmov.u8 r3, q5[12] -; CHECK-NEXT: vmov.16 q1[4], r3 -; CHECK-NEXT: vmov.u8 r3, q5[13] -; CHECK-NEXT: vmov.16 q1[5], r3 -; CHECK-NEXT: vmov.u8 r3, q5[14] -; CHECK-NEXT: vmov.16 q1[6], r3 -; CHECK-NEXT: vmov.u8 r3, q5[15] -; CHECK-NEXT: vmov.16 q1[7], r3 -; CHECK-NEXT: vmov.u8 r3, q4[15] -; CHECK-NEXT: vmov.16 q7[7], r3 -; CHECK-NEXT: vmov.u8 r3, q3[8] -; CHECK-NEXT: vmullb.u8 q4, q7, q1 -; CHECK-NEXT: vmov.16 q1[0], r3 -; CHECK-NEXT: vmov.u8 r3, q3[9] -; CHECK-NEXT: vmov.16 q1[1], r3 -; CHECK-NEXT: vmov.u8 r3, q3[10] -; CHECK-NEXT: vmov.16 q1[2], r3 -; CHECK-NEXT: vmov.u8 r3, q3[11] -; CHECK-NEXT: vmov.16 q1[3], r3 -; CHECK-NEXT: vmov.u8 r3, q3[12] -; CHECK-NEXT: vmov.16 q1[4], r3 -; CHECK-NEXT: vmov.u8 r3, q3[13] -; CHECK-NEXT: vmov.16 q1[5], r3 -; CHECK-NEXT: vmov.u8 r3, q3[14] -; CHECK-NEXT: vmov.16 q1[6], r3 -; CHECK-NEXT: vmov.u8 r3, q3[15] -; CHECK-NEXT: vmov.16 q1[7], r3 -; CHECK-NEXT: vpt.i16 ne, q1, zr -; CHECK-NEXT: vaddt.i16 q6, q6, q4 -; CHECK-NEXT: vaddva.u16 r12, q6 -; CHECK-NEXT: le lr, .LBB26_2 -; CHECK-NEXT: b .LBB26_4 -; CHECK-NEXT: .LBB26_3: +; CHECK-NEXT: vldrb.u8 q0, [r0], #16 +; CHECK-NEXT: vldrb.u8 q1, [r1], #16 +; CHECK-NEXT: vmlava.u8 r12, q1, q0 +; CHECK-NEXT: letp lr, .LBB26_2 +; CHECK-NEXT: @ %bb.3: @ %for.cond.cleanup +; CHECK-NEXT: sxth.w r0, r12 +; CHECK-NEXT: pop {r7, pc} +; CHECK-NEXT: .LBB26_4: ; CHECK-NEXT: mov.w r12, #0 -; CHECK-NEXT: .LBB26_4: @ %for.cond.cleanup ; CHECK-NEXT: sxth.w r0, r12 -; CHECK-NEXT: add sp, #24 -; CHECK-NEXT: vpop {d8, d9, d10, d11, d12, d13, d14, d15} ; CHECK-NEXT: pop {r7, pc} entry: %cmp13.not = icmp eq i32 %n, 0 diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll index 67a0075..93e3b16 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mla.ll @@ -275,74 +275,7 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x, <16 x i8> %y) { ; CHECK-LABEL: add_v16i8_v16i16_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vmullb.u8 q2, q3, q2 -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmullb.u8 q0, q1, q3 -; CHECK-NEXT: vadd.i16 q0, q0, q2 -; CHECK-NEXT: vaddv.u16 r0, q0 +; CHECK-NEXT: vmlav.u8 r0, q0, q1 ; CHECK-NEXT: uxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -356,74 +289,7 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x, <16 x i8> %y) { ; CHECK-LABEL: add_v16i8_v16i16_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vmullb.s8 q2, q3, q2 -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmullb.s8 q0, q1, q3 -; CHECK-NEXT: vadd.i16 q0, q0, q2 -; CHECK-NEXT: vaddv.u16 r0, q0 +; CHECK-NEXT: vmlav.s8 r0, q0, q1 ; CHECK-NEXT: sxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -1243,74 +1109,7 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, <16 x i8> %y, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vmullb.u8 q2, q3, q2 -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmullb.u8 q0, q1, q3 -; CHECK-NEXT: vadd.i16 q0, q0, q2 -; CHECK-NEXT: vaddva.u16 r0, q0 +; CHECK-NEXT: vmlava.u8 r0, q0, q1 ; CHECK-NEXT: uxth r0, r0 ; CHECK-NEXT: bx lr entry: @@ -1325,74 +1124,7 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, <16 x i8> %y, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vmullb.s8 q2, q3, q2 -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmullb.s8 q0, q1, q3 -; CHECK-NEXT: vadd.i16 q0, q0, q2 -; CHECK-NEXT: vaddva.u16 r0, q0 +; CHECK-NEXT: vmlava.s8 r0, q0, q1 ; CHECK-NEXT: sxth r0, r0 ; CHECK-NEXT: bx lr entry: diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll index 6510aff..f30856d 100644 --- a/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-mlapred.ll @@ -416,120 +416,9 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) { ; CHECK-LABEL: add_v16i8_v16i16_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vcmp.i8 eq, q2, zr -; CHECK-NEXT: vmov.i8 q2, #0x0 -; CHECK-NEXT: vmov.i8 q3, #0xff -; CHECK-NEXT: vpsel q2, q3, q2 -; CHECK-NEXT: vmov.u8 r0, q2[0] -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q2[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q2[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q2[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q2[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q2[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q2[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q2[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vcmp.i16 ne, q3, zr -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmovlb.u8 q4, q3 -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q2[8] -; CHECK-NEXT: vmovlb.u8 q5, q3 -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpst -; CHECK-NEXT: vmult.i16 q3, q5, q4 -; CHECK-NEXT: vmov.16 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q2[9] -; CHECK-NEXT: vmov.16 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q2[10] -; CHECK-NEXT: vmov.16 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q2[11] -; CHECK-NEXT: vmov.16 q4[3], r0 -; CHECK-NEXT: vmov.u8 r0, q2[12] -; CHECK-NEXT: vmov.16 q4[4], r0 -; CHECK-NEXT: vmov.u8 r0, q2[13] -; CHECK-NEXT: vmov.16 q4[5], r0 -; CHECK-NEXT: vmov.u8 r0, q2[14] -; CHECK-NEXT: vmov.16 q4[6], r0 -; CHECK-NEXT: vmov.u8 r0, q2[15] -; CHECK-NEXT: vmov.16 q4[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmullb.u8 q0, q1, q2 -; CHECK-NEXT: vpt.i16 ne, q4, zr -; CHECK-NEXT: vaddt.i16 q3, q3, q0 -; CHECK-NEXT: vaddv.u16 r0, q3 +; CHECK-NEXT: vpt.i8 eq, q2, zr +; CHECK-NEXT: vmlavt.u8 r0, q0, q1 ; CHECK-NEXT: uxth r0, r0 -; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %c = icmp eq <16 x i8> %b, zeroinitializer @@ -544,120 +433,9 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b) { ; CHECK-LABEL: add_v16i8_v16i16_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vcmp.i8 eq, q2, zr -; CHECK-NEXT: vmov.i8 q2, #0x0 -; CHECK-NEXT: vmov.i8 q3, #0xff -; CHECK-NEXT: vpsel q2, q3, q2 -; CHECK-NEXT: vmov.u8 r0, q2[0] -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q2[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q2[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q2[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q2[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q2[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q2[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q2[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[0] -; CHECK-NEXT: vcmp.i16 ne, q3, zr -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[0] -; CHECK-NEXT: vmovlb.s8 q4, q3 -; CHECK-NEXT: vmov.16 q3[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[1] -; CHECK-NEXT: vmov.16 q3[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[2] -; CHECK-NEXT: vmov.16 q3[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[3] -; CHECK-NEXT: vmov.16 q3[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[4] -; CHECK-NEXT: vmov.16 q3[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[5] -; CHECK-NEXT: vmov.16 q3[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[6] -; CHECK-NEXT: vmov.16 q3[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[7] -; CHECK-NEXT: vmov.16 q3[7], r0 -; CHECK-NEXT: vmov.u8 r0, q2[8] -; CHECK-NEXT: vmovlb.s8 q5, q3 -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpst -; CHECK-NEXT: vmult.i16 q3, q5, q4 -; CHECK-NEXT: vmov.16 q4[0], r0 -; CHECK-NEXT: vmov.u8 r0, q2[9] -; CHECK-NEXT: vmov.16 q4[1], r0 -; CHECK-NEXT: vmov.u8 r0, q2[10] -; CHECK-NEXT: vmov.16 q4[2], r0 -; CHECK-NEXT: vmov.u8 r0, q2[11] -; CHECK-NEXT: vmov.16 q4[3], r0 -; CHECK-NEXT: vmov.u8 r0, q2[12] -; CHECK-NEXT: vmov.16 q4[4], r0 -; CHECK-NEXT: vmov.u8 r0, q2[13] -; CHECK-NEXT: vmov.16 q4[5], r0 -; CHECK-NEXT: vmov.u8 r0, q2[14] -; CHECK-NEXT: vmov.16 q4[6], r0 -; CHECK-NEXT: vmov.u8 r0, q2[15] -; CHECK-NEXT: vmov.16 q4[7], r0 -; CHECK-NEXT: vmov.u8 r0, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r0 -; CHECK-NEXT: vmov.u8 r0, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r0 -; CHECK-NEXT: vmov.u8 r0, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r0 -; CHECK-NEXT: vmov.u8 r0, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r0 -; CHECK-NEXT: vmov.u8 r0, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r0 -; CHECK-NEXT: vmov.u8 r0, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r0 -; CHECK-NEXT: vmov.u8 r0, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r0 -; CHECK-NEXT: vmov.u8 r0, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r0 -; CHECK-NEXT: vmov.u8 r0, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r0 -; CHECK-NEXT: vmov.u8 r0, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r0 -; CHECK-NEXT: vmov.u8 r0, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r0 -; CHECK-NEXT: vmov.u8 r0, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r0 -; CHECK-NEXT: vmov.u8 r0, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r0 -; CHECK-NEXT: vmov.u8 r0, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r0 -; CHECK-NEXT: vmov.u8 r0, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r0 -; CHECK-NEXT: vmov.u8 r0, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r0 -; CHECK-NEXT: vmullb.s8 q0, q1, q2 -; CHECK-NEXT: vpt.i16 ne, q4, zr -; CHECK-NEXT: vaddt.i16 q3, q3, q0 -; CHECK-NEXT: vaddv.u16 r0, q3 +; CHECK-NEXT: vpt.i8 eq, q2, zr +; CHECK-NEXT: vmlavt.s8 r0, q0, q1 ; CHECK-NEXT: sxth r0, r0 -; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %c = icmp eq <16 x i8> %b, zeroinitializer @@ -2084,120 +1862,9 @@ entry: define arm_aapcs_vfpcc zeroext i16 @add_v16i8_v16i16_acc_zext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_zext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vcmp.i8 eq, q2, zr -; CHECK-NEXT: vmov.i8 q2, #0x0 -; CHECK-NEXT: vmov.i8 q3, #0xff -; CHECK-NEXT: vpsel q2, q3, q2 -; CHECK-NEXT: vmov.u8 r1, q2[0] -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q2[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q2[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q2[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q2[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q2[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q2[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q2[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vcmp.i16 ne, q3, zr -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmovlb.u8 q4, q3 -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q2[8] -; CHECK-NEXT: vmovlb.u8 q5, q3 -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpst -; CHECK-NEXT: vmult.i16 q3, q5, q4 -; CHECK-NEXT: vmov.16 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q2[9] -; CHECK-NEXT: vmov.16 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q2[10] -; CHECK-NEXT: vmov.16 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q2[11] -; CHECK-NEXT: vmov.16 q4[3], r1 -; CHECK-NEXT: vmov.u8 r1, q2[12] -; CHECK-NEXT: vmov.16 q4[4], r1 -; CHECK-NEXT: vmov.u8 r1, q2[13] -; CHECK-NEXT: vmov.16 q4[5], r1 -; CHECK-NEXT: vmov.u8 r1, q2[14] -; CHECK-NEXT: vmov.16 q4[6], r1 -; CHECK-NEXT: vmov.u8 r1, q2[15] -; CHECK-NEXT: vmov.16 q4[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmullb.u8 q0, q1, q2 -; CHECK-NEXT: vpt.i16 ne, q4, zr -; CHECK-NEXT: vaddt.i16 q3, q3, q0 -; CHECK-NEXT: vaddva.u16 r0, q3 +; CHECK-NEXT: vpt.i8 eq, q2, zr +; CHECK-NEXT: vmlavat.u8 r0, q0, q1 ; CHECK-NEXT: uxth r0, r0 -; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %c = icmp eq <16 x i8> %b, zeroinitializer @@ -2213,120 +1880,9 @@ entry: define arm_aapcs_vfpcc signext i16 @add_v16i8_v16i16_acc_sext(<16 x i8> %x, <16 x i8> %y, <16 x i8> %b, i16 %a) { ; CHECK-LABEL: add_v16i8_v16i16_acc_sext: ; CHECK: @ %bb.0: @ %entry -; CHECK-NEXT: .vsave {d8, d9, d10, d11} -; CHECK-NEXT: vpush {d8, d9, d10, d11} -; CHECK-NEXT: vcmp.i8 eq, q2, zr -; CHECK-NEXT: vmov.i8 q2, #0x0 -; CHECK-NEXT: vmov.i8 q3, #0xff -; CHECK-NEXT: vpsel q2, q3, q2 -; CHECK-NEXT: vmov.u8 r1, q2[0] -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q2[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q2[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q2[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q2[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q2[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q2[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q2[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[0] -; CHECK-NEXT: vcmp.i16 ne, q3, zr -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[0] -; CHECK-NEXT: vmovlb.s8 q4, q3 -; CHECK-NEXT: vmov.16 q3[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[1] -; CHECK-NEXT: vmov.16 q3[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[2] -; CHECK-NEXT: vmov.16 q3[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[3] -; CHECK-NEXT: vmov.16 q3[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[4] -; CHECK-NEXT: vmov.16 q3[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[5] -; CHECK-NEXT: vmov.16 q3[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[6] -; CHECK-NEXT: vmov.16 q3[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[7] -; CHECK-NEXT: vmov.16 q3[7], r1 -; CHECK-NEXT: vmov.u8 r1, q2[8] -; CHECK-NEXT: vmovlb.s8 q5, q3 -; CHECK-NEXT: vmov.i32 q3, #0x0 -; CHECK-NEXT: vpst -; CHECK-NEXT: vmult.i16 q3, q5, q4 -; CHECK-NEXT: vmov.16 q4[0], r1 -; CHECK-NEXT: vmov.u8 r1, q2[9] -; CHECK-NEXT: vmov.16 q4[1], r1 -; CHECK-NEXT: vmov.u8 r1, q2[10] -; CHECK-NEXT: vmov.16 q4[2], r1 -; CHECK-NEXT: vmov.u8 r1, q2[11] -; CHECK-NEXT: vmov.16 q4[3], r1 -; CHECK-NEXT: vmov.u8 r1, q2[12] -; CHECK-NEXT: vmov.16 q4[4], r1 -; CHECK-NEXT: vmov.u8 r1, q2[13] -; CHECK-NEXT: vmov.16 q4[5], r1 -; CHECK-NEXT: vmov.u8 r1, q2[14] -; CHECK-NEXT: vmov.16 q4[6], r1 -; CHECK-NEXT: vmov.u8 r1, q2[15] -; CHECK-NEXT: vmov.16 q4[7], r1 -; CHECK-NEXT: vmov.u8 r1, q1[8] -; CHECK-NEXT: vmov.16 q2[0], r1 -; CHECK-NEXT: vmov.u8 r1, q1[9] -; CHECK-NEXT: vmov.16 q2[1], r1 -; CHECK-NEXT: vmov.u8 r1, q1[10] -; CHECK-NEXT: vmov.16 q2[2], r1 -; CHECK-NEXT: vmov.u8 r1, q1[11] -; CHECK-NEXT: vmov.16 q2[3], r1 -; CHECK-NEXT: vmov.u8 r1, q1[12] -; CHECK-NEXT: vmov.16 q2[4], r1 -; CHECK-NEXT: vmov.u8 r1, q1[13] -; CHECK-NEXT: vmov.16 q2[5], r1 -; CHECK-NEXT: vmov.u8 r1, q1[14] -; CHECK-NEXT: vmov.16 q2[6], r1 -; CHECK-NEXT: vmov.u8 r1, q1[15] -; CHECK-NEXT: vmov.16 q2[7], r1 -; CHECK-NEXT: vmov.u8 r1, q0[8] -; CHECK-NEXT: vmov.16 q1[0], r1 -; CHECK-NEXT: vmov.u8 r1, q0[9] -; CHECK-NEXT: vmov.16 q1[1], r1 -; CHECK-NEXT: vmov.u8 r1, q0[10] -; CHECK-NEXT: vmov.16 q1[2], r1 -; CHECK-NEXT: vmov.u8 r1, q0[11] -; CHECK-NEXT: vmov.16 q1[3], r1 -; CHECK-NEXT: vmov.u8 r1, q0[12] -; CHECK-NEXT: vmov.16 q1[4], r1 -; CHECK-NEXT: vmov.u8 r1, q0[13] -; CHECK-NEXT: vmov.16 q1[5], r1 -; CHECK-NEXT: vmov.u8 r1, q0[14] -; CHECK-NEXT: vmov.16 q1[6], r1 -; CHECK-NEXT: vmov.u8 r1, q0[15] -; CHECK-NEXT: vmov.16 q1[7], r1 -; CHECK-NEXT: vmullb.s8 q0, q1, q2 -; CHECK-NEXT: vpt.i16 ne, q4, zr -; CHECK-NEXT: vaddt.i16 q3, q3, q0 -; CHECK-NEXT: vaddva.u16 r0, q3 +; CHECK-NEXT: vpt.i8 eq, q2, zr +; CHECK-NEXT: vmlavat.s8 r0, q0, q1 ; CHECK-NEXT: sxth r0, r0 -; CHECK-NEXT: vpop {d8, d9, d10, d11} ; CHECK-NEXT: bx lr entry: %c = icmp eq <16 x i8> %b, zeroinitializer -- 2.7.4