assert(Amount != 0 && "Did not need to adjust stack pointer for RVV.");
const RISCVInstrInfo *TII = STI.getInstrInfo();
- Register SPReg = getSPReg(STI);
+ const Register SPReg = getSPReg(STI);
+
+ // Optimize compile time offset case
+ if (STI.getRealMinVLen() == STI.getRealMaxVLen()) {
+ // 1. Multiply the number of v-slots by the (constant) length of register
+ const int64_t VLENB = STI.getRealMinVLen() / 8;
+ assert(Amount % 8 == 0 &&
+ "Reserve the stack by the multiple of one vector size.");
+ const int64_t NumOfVReg = Amount / 8;
+ const int64_t Offset = NumOfVReg * VLENB;
+ if (!isInt<32>(Offset)) {
+ report_fatal_error(
+ "Frame size outside of the signed 32-bit range not supported");
+ }
+ adjustReg(MBB, MBBI, DL, SPReg, SPReg, Offset, Flag);
+ return;
+ }
+
unsigned Opc = RISCV::ADD;
if (Amount < 0) {
Amount = -Amount;
; SPILL-O2-VLEN128-NEXT: addi sp, sp, -32
; SPILL-O2-VLEN128-NEXT: sd ra, 24(sp) # 8-byte Folded Spill
; SPILL-O2-VLEN128-NEXT: sd s0, 16(sp) # 8-byte Folded Spill
-; SPILL-O2-VLEN128-NEXT: csrr a1, vlenb
-; SPILL-O2-VLEN128-NEXT: slli a1, a1, 1
-; SPILL-O2-VLEN128-NEXT: sub sp, sp, a1
+; SPILL-O2-VLEN128-NEXT: addi sp, sp, -32
; SPILL-O2-VLEN128-NEXT: mv s0, a0
; SPILL-O2-VLEN128-NEXT: addi a1, sp, 16
; SPILL-O2-VLEN128-NEXT: vs1r.v v8, (a1) # Unknown-size Folded Spill
; SPILL-O2-VLEN128-NEXT: addi a0, sp, 16
; SPILL-O2-VLEN128-NEXT: vl1r.v v9, (a0) # Unknown-size Folded Reload
; SPILL-O2-VLEN128-NEXT: vfadd.vv v8, v9, v8
-; SPILL-O2-VLEN128-NEXT: csrr a0, vlenb
-; SPILL-O2-VLEN128-NEXT: slli a0, a0, 1
-; SPILL-O2-VLEN128-NEXT: add sp, sp, a0
+; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32
; SPILL-O2-VLEN128-NEXT: ld ra, 24(sp) # 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: ld s0, 16(sp) # 8-byte Folded Reload
; SPILL-O2-VLEN128-NEXT: addi sp, sp, 32