From 1d6430b9e2b82ebb9a90632f3b39c892548528d6 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Tue, 3 May 2022 19:42:42 -0700 Subject: [PATCH] [RISCV] Update isLegalAddressingMode for RVV. RVV instructions only support base register addressing. Reviewed By: reames Differential Revision: https://reviews.llvm.org/D124820 --- llvm/lib/Target/RISCV/RISCVISelLowering.cpp | 4 ++++ .../RISCV/rvv/fixed-vector-strided-load-store.ll | 27 ++++++++++------------ 2 files changed, 16 insertions(+), 15 deletions(-) diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp index ff63b22..2a99956 100644 --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -1014,6 +1014,10 @@ bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL, if (AM.BaseGV) return false; + // RVV instructions only support register addressing. + if (Subtarget.hasVInstructions() && isa(Ty)) + return AM.HasBaseReg && AM.Scale == 0 && !AM.BaseOffs; + // Require a 12-bit signed offset. if (!isInt<12>(AM.BaseOffs)) return false; diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll index ca3b584..4828863 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-strided-load-store.ll @@ -596,7 +596,6 @@ define void @struct_gather(i32* noalias nocapture %A, %struct.foo* noalias nocap ; ; CHECK-ASM-LABEL: struct_gather: ; CHECK-ASM: # %bb.0: # %entry -; CHECK-ASM-NEXT: addi a0, a0, 32 ; CHECK-ASM-NEXT: addi a1, a1, 132 ; CHECK-ASM-NEXT: li a2, 1024 ; CHECK-ASM-NEXT: li a3, 16 @@ -606,13 +605,13 @@ define void @struct_gather(i32* noalias nocapture %A, %struct.foo* noalias nocap ; CHECK-ASM-NEXT: vsetivli zero, 8, e32, m1, ta, mu ; CHECK-ASM-NEXT: vlse32.v v8, (a4), a3 ; CHECK-ASM-NEXT: vlse32.v v9, (a1), a3 -; CHECK-ASM-NEXT: addi a4, a0, -32 -; CHECK-ASM-NEXT: vle32.v v10, (a4) -; CHECK-ASM-NEXT: vle32.v v11, (a0) +; CHECK-ASM-NEXT: vle32.v v10, (a0) +; CHECK-ASM-NEXT: addi a4, a0, 32 +; CHECK-ASM-NEXT: vle32.v v11, (a4) ; CHECK-ASM-NEXT: vadd.vv v8, v10, v8 ; CHECK-ASM-NEXT: vadd.vv v9, v11, v9 -; CHECK-ASM-NEXT: vse32.v v8, (a4) -; CHECK-ASM-NEXT: vse32.v v9, (a0) +; CHECK-ASM-NEXT: vse32.v v8, (a0) +; CHECK-ASM-NEXT: vse32.v v9, (a4) ; CHECK-ASM-NEXT: addi a2, a2, -16 ; CHECK-ASM-NEXT: addi a0, a0, 64 ; CHECK-ASM-NEXT: addi a1, a1, 256 @@ -838,17 +837,16 @@ define void @gather_of_pointers(i32** noalias nocapture %0, i32** noalias nocapt ; ; CHECK-ASM-LABEL: gather_of_pointers: ; CHECK-ASM: # %bb.0: -; CHECK-ASM-NEXT: addi a0, a0, 16 ; CHECK-ASM-NEXT: li a2, 1024 ; CHECK-ASM-NEXT: li a3, 40 ; CHECK-ASM-NEXT: .LBB10_1: # =>This Inner Loop Header: Depth=1 -; CHECK-ASM-NEXT: addi a4, a1, 80 ; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu ; CHECK-ASM-NEXT: vlse64.v v8, (a1), a3 +; CHECK-ASM-NEXT: addi a4, a1, 80 ; CHECK-ASM-NEXT: vlse64.v v9, (a4), a3 -; CHECK-ASM-NEXT: addi a4, a0, -16 -; CHECK-ASM-NEXT: vse64.v v8, (a4) -; CHECK-ASM-NEXT: vse64.v v9, (a0) +; CHECK-ASM-NEXT: vse64.v v8, (a0) +; CHECK-ASM-NEXT: addi a4, a0, 16 +; CHECK-ASM-NEXT: vse64.v v9, (a4) ; CHECK-ASM-NEXT: addi a2, a2, -4 ; CHECK-ASM-NEXT: addi a0, a0, 32 ; CHECK-ASM-NEXT: addi a1, a1, 160 @@ -912,14 +910,13 @@ define void @scatter_of_pointers(i32** noalias nocapture %0, i32** noalias nocap ; ; CHECK-ASM-LABEL: scatter_of_pointers: ; CHECK-ASM: # %bb.0: -; CHECK-ASM-NEXT: addi a1, a1, 16 ; CHECK-ASM-NEXT: li a2, 1024 ; CHECK-ASM-NEXT: li a3, 40 ; CHECK-ASM-NEXT: .LBB11_1: # =>This Inner Loop Header: Depth=1 -; CHECK-ASM-NEXT: addi a4, a1, -16 ; CHECK-ASM-NEXT: vsetivli zero, 2, e64, m1, ta, mu -; CHECK-ASM-NEXT: vle64.v v8, (a4) -; CHECK-ASM-NEXT: vle64.v v9, (a1) +; CHECK-ASM-NEXT: vle64.v v8, (a1) +; CHECK-ASM-NEXT: addi a4, a1, 16 +; CHECK-ASM-NEXT: vle64.v v9, (a4) ; CHECK-ASM-NEXT: addi a4, a0, 80 ; CHECK-ASM-NEXT: vsse64.v v8, (a0), a3 ; CHECK-ASM-NEXT: vsse64.v v9, (a4), a3 -- 2.7.4