From ddd9485129b53a1ca53ebe06f59dd3c7fe2dc333 Mon Sep 17 00:00:00 2001 From: John Brawn Date: Fri, 8 Jul 2022 11:12:38 +0100 Subject: [PATCH] [MVE] Don't distribute add of vecreduce if it has more than one use If the add has more than one use then applying the transformation won't cause it to be removed, so we can end up applying it again causing an infinite loop. Differential Revision: https://reviews.llvm.org/D129361 --- llvm/lib/Target/ARM/ARMISelLowering.cpp | 6 +- .../CodeGen/Thumb2/mve-vecreduce-add-combine.ll | 103 +++++++++++++++++++++ 2 files changed, 106 insertions(+), 3 deletions(-) create mode 100644 llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll diff --git a/llvm/lib/Target/ARM/ARMISelLowering.cpp b/llvm/lib/Target/ARM/ARMISelLowering.cpp index b16e0d9..e6be93e 100644 --- a/llvm/lib/Target/ARM/ARMISelLowering.cpp +++ b/llvm/lib/Target/ARM/ARMISelLowering.cpp @@ -13358,14 +13358,14 @@ static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { // to make better use of vaddva style instructions. if (VT == MVT::i32 && N1.getOpcode() == ISD::ADD && !IsVecReduce(N0) && IsVecReduce(N1.getOperand(0)) && IsVecReduce(N1.getOperand(1)) && - !isa(N0)) { + !isa(N0) && N1->hasOneUse()) { SDValue Add0 = DAG.getNode(ISD::ADD, dl, VT, N0, N1.getOperand(0)); return DAG.getNode(ISD::ADD, dl, VT, Add0, N1.getOperand(1)); } // And turn add(add(A, reduce(B)), add(C, reduce(D))) -> // add(add(add(A, C), reduce(B)), reduce(D)) if (VT == MVT::i32 && N0.getOpcode() == ISD::ADD && - N1.getOpcode() == ISD::ADD) { + N1.getOpcode() == ISD::ADD && N0->hasOneUse() && N1->hasOneUse()) { unsigned N0RedOp = 0; if (!IsVecReduce(N0.getOperand(N0RedOp))) { N0RedOp = 1; @@ -13432,7 +13432,7 @@ static SDValue TryDistrubutionADDVecReduce(SDNode *N, SelectionDAG &DAG) { }; SDValue X; - if (N0.getOpcode() == ISD::ADD) { + if (N0.getOpcode() == ISD::ADD && N0->hasOneUse()) { if (IsVecReduce(N0.getOperand(0)) && IsVecReduce(N0.getOperand(1))) { int IsBefore = IsKnownOrderedLoad(N0.getOperand(0).getOperand(0), N0.getOperand(1).getOperand(0)); diff --git a/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll new file mode 100644 index 0000000..16abf16 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-vecreduce-add-combine.ll @@ -0,0 +1,103 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main-none-none-eabi -mattr=+mve -verify-machineinstrs %s -o - | FileCheck %s --check-prefix=CHECK + +define arm_aapcs_vfpcc i32 @test1(ptr %ptr, i32 %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { +; CHECK-LABEL: test1: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vaddv.u32 r2, q1 +; CHECK-NEXT: vaddva.u32 r2, q0 +; CHECK-NEXT: str r2, [r0] +; CHECK-NEXT: adds r0, r2, r1 +; CHECK-NEXT: bx lr +entry: + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg2) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3) + %add1 = add i32 %reduce1, %reduce2 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %add1, %arg1 + ret i32 %add2 +} + +define arm_aapcs_vfpcc i32 @test2(ptr %ptr, i32 %arg1, <4 x i32> %arg2, <4 x i32> %arg3) { +; CHECK-LABEL: test2: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vaddv.u32 r2, q1 +; CHECK-NEXT: vaddva.u32 r2, q0 +; CHECK-NEXT: str r2, [r0] +; CHECK-NEXT: adds r0, r1, r2 +; CHECK-NEXT: bx lr +entry: + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg2) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3) + %add1 = add i32 %reduce1, %reduce2 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %arg1, %add1 + ret i32 %add2 +} + +define arm_aapcs_vfpcc i32 @test3(ptr %ptr, i32 %arg1, i32 %arg2, <4 x i32> %arg3, <4 x i32> %arg4) { +; CHECK-LABEL: test3: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: vaddva.u32 r2, q1 +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: str.w r12, [r0] +; CHECK-NEXT: add.w r0, r12, r2 +; CHECK-NEXT: bx lr +entry: + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg3) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %arg4) + %add1 = add i32 %arg1, %reduce1 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %arg2, %reduce2 + %add3 = add i32 %add1, %add2 + ret i32 %add3 +} + +define arm_aapcs_vfpcc i32 @test4(ptr %ptr, i32 %arg1, ptr %arg2) { +; CHECK-LABEL: test4: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: vldrw.u32 q0, [r2, #4] +; CHECK-NEXT: str.w r12, [r0] +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: mov r0, r12 +; CHECK-NEXT: bx lr +entry: + %load1 = load <4 x i32>, <4 x i32>* %arg2, align 4 + %gep = getelementptr inbounds i32, i32* %arg2, i32 1 + %load2 = load <4 x i32>, <4 x i32>* %gep, align 4 + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load1) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load2) + %add1 = add i32 %arg1, %reduce1 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %add1, %reduce2 + ret i32 %add2 +} + +define arm_aapcs_vfpcc i32 @test5(ptr %ptr, i32 %arg1, ptr %arg2) { +; CHECK-LABEL: test5: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r2, #4] +; CHECK-NEXT: mov r12, r1 +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: vldrw.u32 q0, [r2] +; CHECK-NEXT: str.w r12, [r0] +; CHECK-NEXT: vaddva.u32 r12, q0 +; CHECK-NEXT: mov r0, r12 +; CHECK-NEXT: bx lr +entry: + %load1 = load <4 x i32>, <4 x i32>* %arg2, align 4 + %gep = getelementptr inbounds i32, i32* %arg2, i32 1 + %load2 = load <4 x i32>, <4 x i32>* %gep, align 4 + %reduce1 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load1) + %reduce2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %load2) + %add1 = add i32 %arg1, %reduce2 + store i32 %add1, i32* %ptr, align 4 + %add2 = add i32 %add1, %reduce1 + ret i32 %add2 +} + +declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>) -- 2.7.4