; RV32-NEXT: .cfi_def_cfa_offset 144
; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: addi s0, sp, 144
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 4
; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a3, a0, a1
; RV32-NEXT: vmv8r.v v8, v0
; RV32-NEXT: vmv8r.v v16, v24
; RV32-NEXT: call ext2@plt
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
; RV32-NEXT: ret
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: addi s0, sp, 144
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 4
; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a3, a0, a1
; RV64-NEXT: vmv8r.v v8, v0
; RV64-NEXT: vmv8r.v v16, v24
; RV64-NEXT: call ext2@plt
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 4
-; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: ret
; RV32-NEXT: .cfi_def_cfa_offset 144
; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: addi s0, sp, 144
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: li a3, 48
; RV32-NEXT: mul a1, a1, a3
; RV32-NEXT: sub sp, sp, a1
+; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: csrr a1, vlenb
; RV32-NEXT: slli a1, a1, 3
; RV32-NEXT: add a3, a2, a1
; RV32-NEXT: addi a1, a1, 128
; RV32-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload
; RV32-NEXT: call ext3@plt
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: li a1, 48
-; RV32-NEXT: mul a0, a0, a1
-; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
; RV32-NEXT: ret
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: addi s0, sp, 144
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: li a3, 48
; RV64-NEXT: mul a1, a1, a3
; RV64-NEXT: sub sp, sp, a1
+; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: csrr a1, vlenb
; RV64-NEXT: slli a1, a1, 3
; RV64-NEXT: add a3, a2, a1
; RV64-NEXT: addi a1, a1, 128
; RV64-NEXT: vl8re8.v v16, (a1) # Unknown-size Folded Reload
; RV64-NEXT: call ext3@plt
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: li a1, 48
-; RV64-NEXT: mul a0, a0, a1
-; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: ret
; RV32-NEXT: .cfi_def_cfa_offset 144
; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: addi s0, sp, 144
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 5
; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: addi a1, sp, 128
; RV32-NEXT: li a0, 0
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: call vector_arg_indirect_stack@plt
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 5
-; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
; RV32-NEXT: ret
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: addi s0, sp, 144
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 5
; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: addi a1, sp, 128
; RV64-NEXT: li a0, 0
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: call vector_arg_indirect_stack@plt
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 5
-; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: ret
; RV32-NEXT: .cfi_def_cfa_offset 144
; RV32-NEXT: sw ra, 140(sp) # 4-byte Folded Spill
; RV32-NEXT: .cfi_offset ra, -4
+; RV32-NEXT: addi s0, sp, 144
+; RV32-NEXT: .cfi_def_cfa s0, 0
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 4
; RV32-NEXT: sub sp, sp, a0
+; RV32-NEXT: andi sp, sp, -128
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 3
; RV32-NEXT: addi a1, sp, 128
; RV32-NEXT: addi a0, sp, 128
; RV32-NEXT: vmv.v.i v16, 0
; RV32-NEXT: call callee_scalable_vector_split_indirect@plt
-; RV32-NEXT: csrr a0, vlenb
-; RV32-NEXT: slli a0, a0, 4
-; RV32-NEXT: add sp, sp, a0
+; RV32-NEXT: addi sp, s0, -144
; RV32-NEXT: lw ra, 140(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 144
; RV32-NEXT: ret
; RV64-NEXT: .cfi_def_cfa_offset 144
; RV64-NEXT: sd ra, 136(sp) # 8-byte Folded Spill
; RV64-NEXT: .cfi_offset ra, -8
+; RV64-NEXT: addi s0, sp, 144
+; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 4
; RV64-NEXT: sub sp, sp, a0
+; RV64-NEXT: andi sp, sp, -128
; RV64-NEXT: csrr a0, vlenb
; RV64-NEXT: slli a0, a0, 3
; RV64-NEXT: addi a1, sp, 128
; RV64-NEXT: addi a0, sp, 128
; RV64-NEXT: vmv.v.i v16, 0
; RV64-NEXT: call callee_scalable_vector_split_indirect@plt
-; RV64-NEXT: csrr a0, vlenb
-; RV64-NEXT: slli a0, a0, 4
-; RV64-NEXT: add sp, sp, a0
+; RV64-NEXT: addi sp, s0, -144
; RV64-NEXT: ld ra, 136(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 144
; RV64-NEXT: ret
; CHECK-NEXT: .cfi_def_cfa_offset 80
; CHECK-NEXT: sd ra, 72(sp) # 8-byte Folded Spill
; CHECK-NEXT: .cfi_offset ra, -8
+; CHECK-NEXT: addi s0, sp, 80
+; CHECK-NEXT: .cfi_def_cfa s0, 0
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 4
; CHECK-NEXT: sub sp, sp, t0
-; CHECK-NEXT: addi t0, sp, 64
+; CHECK-NEXT: andi sp, sp, -64
+; CHECK-NEXT: mv s1, sp
+; CHECK-NEXT: addi t0, s1, 64
; CHECK-NEXT: sd t0, 8(sp)
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 3
-; CHECK-NEXT: add t0, sp, t0
+; CHECK-NEXT: add t0, s1, t0
; CHECK-NEXT: addi t0, t0, 64
; CHECK-NEXT: sd t0, 0(sp)
-; CHECK-NEXT: addi t0, sp, 64
+; CHECK-NEXT: addi t0, s1, 64
; CHECK-NEXT: vs8r.v v8, (t0)
; CHECK-NEXT: csrr t0, vlenb
; CHECK-NEXT: slli t0, t0, 3
-; CHECK-NEXT: add t0, sp, t0
+; CHECK-NEXT: add t0, s1, t0
; CHECK-NEXT: addi t0, t0, 64
; CHECK-NEXT: vs8r.v v8, (t0)
; CHECK-NEXT: vmv8r.v v16, v8
; CHECK-NEXT: call bar@plt
-; CHECK-NEXT: csrr a0, vlenb
-; CHECK-NEXT: slli a0, a0, 4
-; CHECK-NEXT: add sp, sp, a0
+; CHECK-NEXT: addi sp, s0, -80
; CHECK-NEXT: ld ra, 72(sp) # 8-byte Folded Reload
; CHECK-NEXT: addi sp, sp, 80
; CHECK-NEXT: ret