MachineRegisterInfo &MRI = MF.getRegInfo();
- // VL and VTYPE are alive here.
- MachineInstrBuilder MIB = BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI));
+ auto BuildVSETVLI = [&]() {
+ if (VLIndex >= 0) {
+ Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
+ Register VLReg = MI.getOperand(VLIndex).getReg();
+
+ // VL might be a compile time constant, but isel would have to put it
+ // in a register. See if VL comes from an ADDI X0, imm.
+ if (VLReg.isVirtual()) {
+ MachineInstr *Def = MRI.getVRegDef(VLReg);
+ if (Def && Def->getOpcode() == RISCV::ADDI &&
+ Def->getOperand(1).getReg() == RISCV::X0 &&
+ Def->getOperand(2).isImm()) {
+ uint64_t Imm = Def->getOperand(2).getImm();
+ // VSETIVLI allows a 5-bit zero extended immediate.
+ if (isUInt<5>(Imm))
+ return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETIVLI))
+ .addReg(DestReg, RegState::Define | RegState::Dead)
+ .addImm(Imm);
+ }
+ }
+
+ return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
+ .addReg(DestReg, RegState::Define | RegState::Dead)
+ .addReg(VLReg);
+ }
- if (VLIndex >= 0) {
- // Set VL (rs1 != X0).
- Register DestReg = MRI.createVirtualRegister(&RISCV::GPRRegClass);
- MIB.addReg(DestReg, RegState::Define | RegState::Dead)
- .addReg(MI.getOperand(VLIndex).getReg());
- } else
// With no VL operator in the pseudo, do not modify VL (rd = X0, rs1 = X0).
- MIB.addReg(RISCV::X0, RegState::Define | RegState::Dead)
+ return BuildMI(*BB, MI, DL, TII.get(RISCV::PseudoVSETVLI))
+ .addReg(RISCV::X0, RegState::Define | RegState::Dead)
.addReg(RISCV::X0, RegState::Kill);
+ };
+
+ MachineInstrBuilder MIB = BuildVSETVLI();
// Default to tail agnostic unless the destination is tied to a source. In
// that case the user would have some control over the tail values. The tail
define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
; CHECK-LABEL: extractelt_nxv1f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
; CHECK-LABEL: extractelt_nxv2f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
; CHECK-LABEL: extractelt_nxv4f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
; CHECK-LABEL: extractelt_nxv8f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
; CHECK-LABEL: extractelt_nxv16f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv16f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
; CHECK-LABEL: extractelt_nxv32f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv32f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
; CHECK-LABEL: extractelt_nxv1f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
; CHECK-LABEL: extractelt_nxv2f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
; CHECK-LABEL: extractelt_nxv4f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
; CHECK-LABEL: extractelt_nxv8f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
; CHECK-LABEL: extractelt_nxv16f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv16f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
; CHECK-LABEL: extractelt_nxv1f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
; CHECK-LABEL: extractelt_nxv2f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
; CHECK-LABEL: extractelt_nxv4f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
; CHECK-LABEL: extractelt_nxv8f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define half @extractelt_nxv1f16_imm(<vscale x 1 x half> %v) {
; CHECK-LABEL: extractelt_nxv1f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv1f16_idx(<vscale x 1 x half> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv2f16_imm(<vscale x 2 x half> %v) {
; CHECK-LABEL: extractelt_nxv2f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv2f16_idx(<vscale x 2 x half> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv4f16_imm(<vscale x 4 x half> %v) {
; CHECK-LABEL: extractelt_nxv4f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv4f16_idx(<vscale x 4 x half> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define half @extractelt_nxv8f16_imm(<vscale x 8 x half> %v) {
; CHECK-LABEL: extractelt_nxv8f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define half @extractelt_nxv8f16_idx(<vscale x 8 x half> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define half @extractelt_nxv16f16_imm(<vscale x 16 x half> %v) {
; CHECK-LABEL: extractelt_nxv16f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define half @extractelt_nxv16f16_idx(<vscale x 16 x half> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv16f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define half @extractelt_nxv32f16_imm(<vscale x 32 x half> %v) {
; CHECK-LABEL: extractelt_nxv32f16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define half @extractelt_nxv32f16_idx(<vscale x 32 x half> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv32f16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define float @extractelt_nxv1f32_imm(<vscale x 1 x float> %v) {
; CHECK-LABEL: extractelt_nxv1f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv1f32_idx(<vscale x 1 x float> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv2f32_imm(<vscale x 2 x float> %v) {
; CHECK-LABEL: extractelt_nxv2f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv2f32_idx(<vscale x 2 x float> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define float @extractelt_nxv4f32_imm(<vscale x 4 x float> %v) {
; CHECK-LABEL: extractelt_nxv4f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define float @extractelt_nxv4f32_idx(<vscale x 4 x float> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define float @extractelt_nxv8f32_imm(<vscale x 8 x float> %v) {
; CHECK-LABEL: extractelt_nxv8f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define float @extractelt_nxv8f32_idx(<vscale x 8 x float> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define float @extractelt_nxv16f32_imm(<vscale x 16 x float> %v) {
; CHECK-LABEL: extractelt_nxv16f32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define float @extractelt_nxv16f32_idx(<vscale x 16 x float> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv16f32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define double @extractelt_nxv1f64_imm(<vscale x 1 x double> %v) {
; CHECK-LABEL: extractelt_nxv1f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define double @extractelt_nxv1f64_idx(<vscale x 1 x double> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v25
define double @extractelt_nxv2f64_imm(<vscale x 2 x double> %v) {
; CHECK-LABEL: extractelt_nxv2f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define double @extractelt_nxv2f64_idx(<vscale x 2 x double> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v26
define double @extractelt_nxv4f64_imm(<vscale x 4 x double> %v) {
; CHECK-LABEL: extractelt_nxv4f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define double @extractelt_nxv4f64_idx(<vscale x 4 x double> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v28
define double @extractelt_nxv8f64_imm(<vscale x 8 x double> %v) {
; CHECK-LABEL: extractelt_nxv8f64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define double @extractelt_nxv8f64_idx(<vscale x 8 x double> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8f64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vfmv.f.s fa0, v8
define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
; CHECK-LABEL: extractelt_nxv1i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
; CHECK-LABEL: extractelt_nxv2i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
; CHECK-LABEL: extractelt_nxv4i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
; CHECK-LABEL: extractelt_nxv8i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv16i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv32i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv64i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
; CHECK-LABEL: extractelt_nxv1i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
; CHECK-LABEL: extractelt_nxv2i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
; CHECK-LABEL: extractelt_nxv4i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv16i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv32i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
; CHECK-LABEL: extractelt_nxv1i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
; CHECK-LABEL: extractelt_nxv2i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv16i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv1i64_0(<vscale x 1 x i64> %v) {
; CHECK-LABEL: extractelt_nxv1i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: addi a1, zero, 32
-; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v8, a1
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
; CHECK-LABEL: extractelt_nxv1i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv1i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
define i64 @extractelt_nxv2i64_0(<vscale x 2 x i64> %v) {
; CHECK-LABEL: extractelt_nxv2i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: addi a1, zero, 32
-; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
-; CHECK-NEXT: vsrl.vx v26, v8, a1
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu
+; CHECK-NEXT: vsrl.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a1, v26
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
; CHECK-LABEL: extractelt_nxv2i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT: vsrl.vx v26, v26, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m2,ta,mu
+; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a1, v26
; CHECK-NEXT: ret
define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv2i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
-; CHECK-NEXT: vsrl.vx v26, v26, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m2,ta,mu
+; CHECK-NEXT: vsrl.vx v26, v26, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a1, v26
; CHECK-NEXT: ret
define i64 @extractelt_nxv4i64_0(<vscale x 4 x i64> %v) {
; CHECK-LABEL: extractelt_nxv4i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: addi a1, zero, 32
-; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
-; CHECK-NEXT: vsrl.vx v28, v8, a1
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu
+; CHECK-NEXT: vsrl.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a1, v28
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
; CHECK-LABEL: extractelt_nxv4i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT: vsrl.vx v28, v28, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m4,ta,mu
+; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a1, v28
; CHECK-NEXT: ret
define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv4i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a2, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
-; CHECK-NEXT: vsrl.vx v28, v28, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m4,ta,mu
+; CHECK-NEXT: vsrl.vx v28, v28, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a1, v28
; CHECK-NEXT: ret
define i64 @extractelt_nxv8i64_0(<vscale x 8 x i64> %v) {
; CHECK-LABEL: extractelt_nxv8i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: addi a1, zero, 32
-; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
-; CHECK-NEXT: vsrl.vx v16, v8, a1
+; CHECK-NEXT: addi a0, zero, 32
+; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu
+; CHECK-NEXT: vsrl.vx v16, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a1, v16
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
; CHECK-LABEL: extractelt_nxv8i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a0, a1, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
-; CHECK-NEXT: vsrl.vx v8, v8, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m8,ta,mu
+; CHECK-NEXT: vsrl.vx v8, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a1, v8
; CHECK-NEXT: ret
define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 %idx) {
; CHECK-LABEL: extractelt_nxv8i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a2, a1, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
-; CHECK-NEXT: vsrl.vx v8, v8, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m8,ta,mu
+; CHECK-NEXT: vsrl.vx v8, v8, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a1, v8
; CHECK-NEXT: ret
define signext i8 @extractelt_nxv1i8_imm(<vscale x 1 x i8> %v) {
; CHECK-LABEL: extractelt_nxv1i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv1i8_idx(<vscale x 1 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,mf8,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv2i8_imm(<vscale x 2 x i8> %v) {
; CHECK-LABEL: extractelt_nxv2i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv2i8_idx(<vscale x 2 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv4i8_imm(<vscale x 4 x i8> %v) {
; CHECK-LABEL: extractelt_nxv4i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv4i8_idx(<vscale x 4 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv8i8_imm(<vscale x 8 x i8> %v) {
; CHECK-LABEL: extractelt_nxv8i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv8i8_idx(<vscale x 8 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i8 @extractelt_nxv16i8_imm(<vscale x 16 x i8> %v) {
; CHECK-LABEL: extractelt_nxv16i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i8 @extractelt_nxv16i8_idx(<vscale x 16 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv16i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i8 @extractelt_nxv32i8_imm(<vscale x 32 x i8> %v) {
; CHECK-LABEL: extractelt_nxv32i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i8 @extractelt_nxv32i8_idx(<vscale x 32 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv32i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i8 @extractelt_nxv64i8_imm(<vscale x 64 x i8> %v) {
; CHECK-LABEL: extractelt_nxv64i8_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i8 @extractelt_nxv64i8_idx(<vscale x 64 x i8> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv64i8_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e8,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i16 @extractelt_nxv1i16_imm(<vscale x 1 x i16> %v) {
; CHECK-LABEL: extractelt_nxv1i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv1i16_idx(<vscale x 1 x i16> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf4,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv2i16_imm(<vscale x 2 x i16> %v) {
; CHECK-LABEL: extractelt_nxv2i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv2i16_idx(<vscale x 2 x i16> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv4i16_imm(<vscale x 4 x i16> %v) {
; CHECK-LABEL: extractelt_nxv4i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv4i16_idx(<vscale x 4 x i16> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i16 @extractelt_nxv8i16_imm(<vscale x 8 x i16> %v) {
; CHECK-LABEL: extractelt_nxv8i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i16 @extractelt_nxv8i16_idx(<vscale x 8 x i16> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i16 @extractelt_nxv16i16_imm(<vscale x 16 x i16> %v) {
; CHECK-LABEL: extractelt_nxv16i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i16 @extractelt_nxv16i16_idx(<vscale x 16 x i16> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv16i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i16 @extractelt_nxv32i16_imm(<vscale x 32 x i16> %v) {
; CHECK-LABEL: extractelt_nxv32i16_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i16 @extractelt_nxv32i16_idx(<vscale x 32 x i16> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv32i16_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e16,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e16,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e16,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i32 @extractelt_nxv1i32_imm(<vscale x 1 x i32> %v) {
; CHECK-LABEL: extractelt_nxv1i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i32 @extractelt_nxv1i32_idx(<vscale x 1 x i32> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,mf2,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,mf2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i32 @extractelt_nxv2i32_imm(<vscale x 2 x i32> %v) {
; CHECK-LABEL: extractelt_nxv2i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i32 @extractelt_nxv2i32_idx(<vscale x 2 x i32> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define signext i32 @extractelt_nxv4i32_imm(<vscale x 4 x i32> %v) {
; CHECK-LABEL: extractelt_nxv4i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i32 @extractelt_nxv4i32_idx(<vscale x 4 x i32> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define signext i32 @extractelt_nxv8i32_imm(<vscale x 8 x i32> %v) {
; CHECK-LABEL: extractelt_nxv8i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i32 @extractelt_nxv8i32_idx(<vscale x 8 x i32> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define signext i32 @extractelt_nxv16i32_imm(<vscale x 16 x i32> %v) {
; CHECK-LABEL: extractelt_nxv16i32_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define signext i32 @extractelt_nxv16i32_idx(<vscale x 16 x i32> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv16i32_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e32,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e32,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e32,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv1i64_imm(<vscale x 1 x i64> %v) {
; CHECK-LABEL: extractelt_nxv1i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vi v25, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define i64 @extractelt_nxv1i64_idx(<vscale x 1 x i64> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv1i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m1,ta,mu
; CHECK-NEXT: vslidedown.vx v25, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
define i64 @extractelt_nxv2i64_imm(<vscale x 2 x i64> %v) {
; CHECK-LABEL: extractelt_nxv2i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vi v26, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define i64 @extractelt_nxv2i64_idx(<vscale x 2 x i64> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv2i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m2,ta,mu
; CHECK-NEXT: vslidedown.vx v26, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; CHECK-NEXT: vmv.x.s a0, v26
define i64 @extractelt_nxv4i64_imm(<vscale x 4 x i64> %v) {
; CHECK-LABEL: extractelt_nxv4i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vi v28, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define i64 @extractelt_nxv4i64_idx(<vscale x 4 x i64> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv4i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m4,ta,mu
; CHECK-NEXT: vslidedown.vx v28, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vmv.x.s a0, v28
define i64 @extractelt_nxv8i64_imm(<vscale x 8 x i64> %v) {
; CHECK-LABEL: extractelt_nxv8i64_imm:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a0, zero, 1
-; CHECK-NEXT: vsetvli a0, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a0, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vi v8, v8, 2
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define i64 @extractelt_nxv8i64_idx(<vscale x 8 x i64> %v, i32 signext %idx) {
; CHECK-LABEL: extractelt_nxv8i64_idx:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e64,m8,ta,mu
; CHECK-NEXT: vslidedown.vx v8, v8, a0
; CHECK-NEXT: vsetvli zero, zero, e64,m8,ta,mu
; CHECK-NEXT: vmv.x.s a0, v8
define i8 @extractelt_v16i8(<16 x i8>* %x) nounwind {
; RV32-LABEL: extractelt_v16i8:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 16
-; RV32-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; RV32-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; RV32-NEXT: vle8.v v25, (a0)
-; RV32-NEXT: addi a0, zero, 1
-; RV32-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e8,m1,ta,mu
; RV32-NEXT: vslidedown.vi v25, v25, 7
; RV32-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; RV32-NEXT: vmv.x.s a0, v25
;
; RV64-LABEL: extractelt_v16i8:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 16
-; RV64-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; RV64-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; RV64-NEXT: vle8.v v25, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e8,m1,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e8,m1,ta,mu
; RV64-NEXT: vslidedown.vi v25, v25, 7
; RV64-NEXT: vsetvli zero, zero, e8,m1,ta,mu
; RV64-NEXT: vmv.x.s a0, v25
define i16 @extractelt_v8i16(<8 x i16>* %x) nounwind {
; RV32-LABEL: extractelt_v8i16:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 8
-; RV32-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; RV32-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; RV32-NEXT: vle16.v v25, (a0)
-; RV32-NEXT: addi a0, zero, 1
-; RV32-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; RV32-NEXT: vslidedown.vi v25, v25, 7
; RV32-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; RV32-NEXT: vmv.x.s a0, v25
;
; RV64-LABEL: extractelt_v8i16:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 8
-; RV64-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; RV64-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; RV64-NEXT: vle16.v v25, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e16,m1,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e16,m1,ta,mu
; RV64-NEXT: vslidedown.vi v25, v25, 7
; RV64-NEXT: vsetvli zero, zero, e16,m1,ta,mu
; RV64-NEXT: vmv.x.s a0, v25
define i32 @extractelt_v4i32(<4 x i32>* %x) nounwind {
; RV32-LABEL: extractelt_v4i32:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 4
-; RV32-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; RV32-NEXT: vle32.v v25, (a0)
-; RV32-NEXT: addi a0, zero, 1
-; RV32-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e32,m1,ta,mu
; RV32-NEXT: vslidedown.vi v25, v25, 2
; RV32-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; RV32-NEXT: vmv.x.s a0, v25
;
; RV64-LABEL: extractelt_v4i32:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 4
-; RV64-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; RV64-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; RV64-NEXT: vle32.v v25, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e32,m1,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e32,m1,ta,mu
; RV64-NEXT: vslidedown.vi v25, v25, 2
; RV64-NEXT: vsetvli zero, zero, e32,m1,ta,mu
; RV64-NEXT: vmv.x.s a0, v25
define i64 @extractelt_v2i64(<2 x i64>* %x) nounwind {
; RV32-LABEL: extractelt_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 2
-; RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; RV32-NEXT: vle64.v v25, (a0)
; RV32-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV32-NEXT: vmv.x.s a0, v25
-; RV32-NEXT: addi a1, zero, 1
-; RV32-NEXT: addi a2, zero, 32
-; RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; RV32-NEXT: vsrl.vx v25, v25, a2
+; RV32-NEXT: addi a1, zero, 32
+; RV32-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; RV32-NEXT: vsrl.vx v25, v25, a1
; RV32-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV32-NEXT: vmv.x.s a1, v25
; RV32-NEXT: ret
;
; RV64-LABEL: extractelt_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 2
-; RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; RV64-NEXT: vle64.v v25, (a0)
; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV64-NEXT: vmv.x.s a0, v25
; RV32-NEXT: addi a1, zero, 32
; RV32-NEXT: vsetvli a1, a1, e8,m2,ta,mu
; RV32-NEXT: vle8.v v26, (a0)
-; RV32-NEXT: addi a0, zero, 1
-; RV32-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e8,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v26, 7
; RV32-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; RV32-NEXT: vmv.x.s a0, v26
; RV64-NEXT: addi a1, zero, 32
; RV64-NEXT: vsetvli a1, a1, e8,m2,ta,mu
; RV64-NEXT: vle8.v v26, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e8,m2,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e8,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v26, 7
; RV64-NEXT: vsetvli zero, zero, e8,m2,ta,mu
; RV64-NEXT: vmv.x.s a0, v26
define i16 @extractelt_v16i16(<16 x i16>* %x) nounwind {
; RV32-LABEL: extractelt_v16i16:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 16
-; RV32-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; RV32-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; RV32-NEXT: vle16.v v26, (a0)
-; RV32-NEXT: addi a0, zero, 1
-; RV32-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e16,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v26, 7
; RV32-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; RV32-NEXT: vmv.x.s a0, v26
;
; RV64-LABEL: extractelt_v16i16:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 16
-; RV64-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; RV64-NEXT: vle16.v v26, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e16,m2,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e16,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v26, 7
; RV64-NEXT: vsetvli zero, zero, e16,m2,ta,mu
; RV64-NEXT: vmv.x.s a0, v26
define i32 @extractelt_v8i32(<8 x i32>* %x) nounwind {
; RV32-LABEL: extractelt_v8i32:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 8
-; RV32-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; RV32-NEXT: vle32.v v26, (a0)
-; RV32-NEXT: addi a0, zero, 1
-; RV32-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v26, 6
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV32-NEXT: vmv.x.s a0, v26
;
; RV64-LABEL: extractelt_v8i32:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 8
-; RV64-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; RV64-NEXT: vle32.v v26, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e32,m2,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v26, 6
; RV64-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV64-NEXT: vmv.x.s a0, v26
define i64 @extractelt_v4i64(<4 x i64>* %x) nounwind {
; RV32-LABEL: extractelt_v4i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 4
-; RV32-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV32-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; RV32-NEXT: vle64.v v26, (a0)
-; RV32-NEXT: addi a1, zero, 1
-; RV32-NEXT: vsetvli a0, a1, e64,m2,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v26, 3
; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV32-NEXT: vmv.x.s a0, v26
-; RV32-NEXT: addi a2, zero, 32
-; RV32-NEXT: vsetvli a1, a1, e64,m2,ta,mu
-; RV32-NEXT: vsrl.vx v26, v26, a2
+; RV32-NEXT: addi a1, zero, 32
+; RV32-NEXT: vsetivli a2, 1, e64,m2,ta,mu
+; RV32-NEXT: vsrl.vx v26, v26, a1
; RV32-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV32-NEXT: vmv.x.s a1, v26
; RV32-NEXT: ret
;
; RV64-LABEL: extractelt_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 4
-; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; RV64-NEXT: vle64.v v26, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v26, 3
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV64-NEXT: vmv.x.s a0, v26
define i64 @extractelt_v3i64(<3 x i64>* %x) nounwind {
; RV32-LABEL: extractelt_v3i64:
; RV32: # %bb.0:
-; RV32-NEXT: addi a1, zero, 8
-; RV32-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; RV32-NEXT: vle32.v v26, (a0)
-; RV32-NEXT: addi a1, zero, 1
-; RV32-NEXT: vsetvli a0, a1, e32,m2,ta,mu
+; RV32-NEXT: vsetivli a0, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v28, v26, 4
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV32-NEXT: vmv.x.s a0, v28
-; RV32-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v26, 5
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
; RV32-NEXT: vmv.x.s a1, v26
;
; RV64-LABEL: extractelt_v3i64:
; RV64: # %bb.0:
-; RV64-NEXT: addi a1, zero, 4
-; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; RV64-NEXT: vle64.v v26, (a0)
-; RV64-NEXT: addi a0, zero, 1
-; RV64-NEXT: vsetvli a0, a0, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a0, 1, e64,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v26, 2
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV64-NEXT: vmv.x.s a0, v26
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI0_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI0_0)
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a1)
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @fcmp_oeq_vv_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oeq_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmfeq.vv v27, v25, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v27, (a2)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
define void @fcmp_oeq_vv_v8f16_nonans(<8 x half>* %x, <8 x half>* %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oeq_vv_v8f16_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmfeq.vv v27, v25, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v27, (a2)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
define void @fcmp_une_vv_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_une_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a4, a3, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmfne.vv v27, v25, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v27, (a2)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
define void @fcmp_une_vv_v4f32_nonans(<4 x float>* %x, <4 x float>* %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_une_vv_v4f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a4, a3, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmfne.vv v27, v25, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v27, (a2)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
define void @fcmp_ogt_vv_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_ogt_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 2
-; CHECK-NEXT: vsetvli a4, a3, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmflt.vv v27, v26, v25
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v27, (a2)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
define void @fcmp_ogt_vv_v2f64_nonans(<2 x double>* %x, <2 x double>* %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_ogt_vv_v2f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 2
-; CHECK-NEXT: vsetvli a4, a3, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmflt.vv v27, v26, v25
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v27, (a2)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
define void @fcmp_olt_vv_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_olt_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vle16.v v28, (a1)
; CHECK-NEXT: vmflt.vv v25, v26, v28
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <16 x half>, <16 x half>* %x
define void @fcmp_olt_vv_v16f16_nonans(<16 x half>* %x, <16 x half>* %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_olt_vv_v16f16_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vle16.v v28, (a1)
; CHECK-NEXT: vmflt.vv v25, v26, v28
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <16 x half>, <16 x half>* %x
define void @fcmp_oge_vv_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oge_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
; CHECK-NEXT: vmfle.vv v25, v28, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
define void @fcmp_oge_vv_v8f32_nonans(<8 x float>* %x, <8 x float>* %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oge_vv_v8f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
; CHECK-NEXT: vmfle.vv v25, v28, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
define void @fcmp_ole_vv_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ole_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a4, a3, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vle64.v v28, (a1)
; CHECK-NEXT: vmfle.vv v25, v26, v28
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <4 x double>, <4 x double>* %x
define void @fcmp_ole_vv_v4f64_nonans(<4 x double>* %x, <4 x double>* %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ole_vv_v4f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a4, a3, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vle64.v v28, (a1)
; CHECK-NEXT: vmfle.vv v25, v26, v28
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <4 x double>, <4 x double>* %x
define void @fcmp_uge_vv_v16f32(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_uge_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e32,m4,ta,mu
; CHECK-NEXT: vle32.v v28, (a0)
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmflt.vv v25, v28, v8
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmnand.mm v25, v25, v25
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
define void @fcmp_uge_vv_v16f32_nonans(<16 x float>* %x, <16 x float>* %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_uge_vv_v16f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e32,m4,ta,mu
; CHECK-NEXT: vle32.v v28, (a0)
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmfle.vv v25, v8, v28
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
define void @fcmp_ult_vv_v8f64(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_ult_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmfle.vv v25, v8, v28
-; CHECK-NEXT: vsetvli a0, a3, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,mf2,ta,mu
; CHECK-NEXT: vmnand.mm v25, v25, v25
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
define void @fcmp_ult_vv_v8f64_nonans(<8 x double>* %x, <8 x double>* %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_ult_vv_v8f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmflt.vv v25, v28, v8
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
define void @fcmp_one_vv_v8f64(<16 x double>* %x, <16 x double>* %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_one_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: vmflt.vv v25, v16, v8
; CHECK-NEXT: vmflt.vv v26, v8, v16
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmor.mm v25, v26, v25
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
define void @fcmp_one_vv_v8f64_nonans(<16 x double>* %x, <16 x double>* %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_one_vv_v8f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vle64.v v16, (a1)
; CHECK-NEXT: vmfne.vv v25, v8, v16
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <16 x double>, <16 x double>* %x
define void @fcmp_ord_vv_v4f16(<4 x half>* %x, <4 x half>* %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ord_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a1)
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vmfeq.vv v27, v25, v25
; CHECK-NEXT: vmfeq.vv v25, v26, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,mf2,ta,mu
; CHECK-NEXT: vmand.mm v25, v25, v27
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <4 x half>, <4 x half>* %x
define void @fcmp_uno_vv_v4f16(<2 x half>* %x, <2 x half>* %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_uno_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 2
-; CHECK-NEXT: vsetvli a4, a3, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 2, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a1)
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vmfne.vv v27, v25, v25
; CHECK-NEXT: vmfne.vv v25, v26, v26
-; CHECK-NEXT: vsetvli a0, a3, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,mf2,ta,mu
; CHECK-NEXT: vmor.mm v25, v25, v27
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a2)
; CHECK-NEXT: ret
%a = load <2 x half>, <2 x half>* %x
define void @fcmp_oeq_vf_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oeq_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vmfeq.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
define void @fcmp_oeq_vf_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oeq_vf_v8f16_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vmfeq.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
define void @fcmp_une_vf_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_une_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vmfne.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
define void @fcmp_une_vf_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_une_vf_v4f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vmfne.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
define void @fcmp_ogt_vf_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_ogt_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vmfgt.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
define void @fcmp_ogt_vf_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_ogt_vf_v2f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vmfgt.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
define void @fcmp_olt_vf_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_olt_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vmflt.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x half>, <16 x half>* %x
define void @fcmp_olt_vf_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_olt_vf_v16f16_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vmflt.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x half>, <16 x half>* %x
define void @fcmp_oge_vf_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oge_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vmfge.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
define void @fcmp_oge_vf_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oge_vf_v8f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vmfge.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
define void @fcmp_ole_vf_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ole_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vmfle.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x double>, <4 x double>* %x
define void @fcmp_ole_vf_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ole_vf_v4f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vmfle.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x double>, <4 x double>* %x
define void @fcmp_uge_vf_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_uge_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e32,m4,ta,mu
; CHECK-NEXT: vle32.v v28, (a0)
; CHECK-NEXT: vmflt.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmnand.mm v25, v25, v25
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @fcmp_uge_vf_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_uge_vf_v16f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e32,m4,ta,mu
; CHECK-NEXT: vle32.v v28, (a0)
; CHECK-NEXT: vmfge.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
define void @fcmp_ult_vf_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_ult_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vmfge.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,mf2,ta,mu
; CHECK-NEXT: vmnand.mm v25, v25, v25
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
define void @fcmp_ult_vf_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_ult_vf_v8f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vmflt.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
define void @fcmp_one_vf_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_one_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfgt.vf v25, v8, fa0
; CHECK-NEXT: vmflt.vf v26, v8, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmor.mm v25, v26, v25
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @fcmp_one_vf_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_one_vf_v8f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfne.vf v25, v8, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x double>, <16 x double>* %x
define void @fcmp_ord_vf_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ord_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v26, fa0
; CHECK-NEXT: vmfeq.vf v27, v26, fa0
; CHECK-NEXT: vmfeq.vv v26, v25, v25
-; CHECK-NEXT: vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,mf2,ta,mu
; CHECK-NEXT: vmand.mm v25, v26, v27
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x half>, <4 x half>* %x
define void @fcmp_uno_vf_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_uno_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v26, fa0
; CHECK-NEXT: vmfne.vf v27, v26, fa0
; CHECK-NEXT: vmfne.vv v26, v25, v25
-; CHECK-NEXT: vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,mf2,ta,mu
; CHECK-NEXT: vmor.mm v25, v26, v27
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <2 x half>, <2 x half>* %x
define void @fcmp_oeq_fv_v8f16(<8 x half>* %x, half %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oeq_fv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vmfeq.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
define void @fcmp_oeq_fv_v8f16_nonans(<8 x half>* %x, half %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oeq_fv_v8f16_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vmfeq.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <8 x half>, <8 x half>* %x
define void @fcmp_une_fv_v4f32(<4 x float>* %x, float %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_une_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vmfne.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
define void @fcmp_une_fv_v4f32_nonans(<4 x float>* %x, float %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_une_fv_v4f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vmfne.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <4 x float>, <4 x float>* %x
define void @fcmp_ogt_fv_v2f64(<2 x double>* %x, double %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_ogt_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vmflt.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
define void @fcmp_ogt_fv_v2f64_nonans(<2 x double>* %x, double %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_ogt_fv_v2f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vmflt.vf v26, v25, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v26, (a1)
; CHECK-NEXT: ret
%a = load <2 x double>, <2 x double>* %x
define void @fcmp_olt_fv_v16f16(<16 x half>* %x, half %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_olt_fv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vmfgt.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x half>, <16 x half>* %x
define void @fcmp_olt_fv_v16f16_nonans(<16 x half>* %x, half %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_olt_fv_v16f16_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vmfgt.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x half>, <16 x half>* %x
define void @fcmp_oge_fv_v8f32(<8 x float>* %x, float %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oge_fv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vmfle.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
define void @fcmp_oge_fv_v8f32_nonans(<8 x float>* %x, float %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_oge_fv_v8f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vmfle.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x float>, <8 x float>* %x
define void @fcmp_ole_fv_v4f64(<4 x double>* %x, double %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ole_fv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vmfge.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x double>, <4 x double>* %x
define void @fcmp_ole_fv_v4f64_nonans(<4 x double>* %x, double %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ole_fv_v4f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vmfge.vf v25, v26, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x double>, <4 x double>* %x
define void @fcmp_uge_fv_v16f32(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_uge_fv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e32,m4,ta,mu
; CHECK-NEXT: vle32.v v28, (a0)
; CHECK-NEXT: vmfgt.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmnand.mm v25, v25, v25
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @fcmp_uge_fv_v16f32_nonans(<16 x float>* %x, float %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_uge_fv_v16f32_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e32,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e32,m4,ta,mu
; CHECK-NEXT: vle32.v v28, (a0)
; CHECK-NEXT: vmfle.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x float>, <16 x float>* %x
define void @fcmp_ult_fv_v8f64(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_ult_fv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vmfle.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,mf2,ta,mu
; CHECK-NEXT: vmnand.mm v25, v25, v25
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
define void @fcmp_ult_fv_v8f64_nonans(<8 x double>* %x, double %y, <8 x i1>* %z) {
; CHECK-LABEL: fcmp_ult_fv_v8f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: vmfgt.vf v25, v28, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <8 x double>, <8 x double>* %x
define void @fcmp_one_fv_v8f64(<16 x double>* %x, double %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_one_fv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmflt.vf v25, v8, fa0
; CHECK-NEXT: vmfgt.vf v26, v8, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vmor.mm v25, v26, v25
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @fcmp_one_fv_v8f64_nonans(<16 x double>* %x, double %y, <16 x i1>* %z) {
; CHECK-LABEL: fcmp_one_fv_v8f64_nonans:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a3, a2, e64,m8,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e64,m8,ta,mu
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: vmfne.vf v25, v8, fa0
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <16 x double>, <16 x double>* %x
define void @fcmp_ord_fv_v4f16(<4 x half>* %x, half %y, <4 x i1>* %z) {
; CHECK-LABEL: fcmp_ord_fv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v26, fa0
; CHECK-NEXT: vmfeq.vf v27, v26, fa0
; CHECK-NEXT: vmfeq.vv v26, v25, v25
-; CHECK-NEXT: vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,mf2,ta,mu
; CHECK-NEXT: vmand.mm v25, v27, v26
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 4, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <4 x half>, <4 x half>* %x
define void @fcmp_uno_fv_v4f16(<2 x half>* %x, half %y, <2 x i1>* %z) {
; CHECK-LABEL: fcmp_uno_fv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a3, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfmv.v.f v26, fa0
; CHECK-NEXT: vmfne.vf v27, v26, fa0
; CHECK-NEXT: vmfne.vv v26, v25, v25
-; CHECK-NEXT: vsetvli a0, a2, e8,mf2,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,mf2,ta,mu
; CHECK-NEXT: vmor.mm v25, v27, v26
-; CHECK-NEXT: vsetvli a0, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 2, e8,m1,ta,mu
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
%a = load <2 x half>, <2 x half>* %x
define void @splat_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: splat_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vfmv.v.f v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: splat_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vfmv.v.f v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: splat_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vfmv.v.f v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_16f16(<16 x half>* %x, half %y) {
; LMULMAX2-LABEL: splat_16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vfmv.v.f v26, fa0
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_16f16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vfmv.v.f v25, fa0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_v8f32(<8 x float>* %x, float %y) {
; LMULMAX2-LABEL: splat_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vfmv.v.f v26, fa0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v8f32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vfmv.v.f v25, fa0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_v4f64(<4 x double>* %x, double %y) {
; LMULMAX2-LABEL: splat_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 4
-; LMULMAX2-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vfmv.v.f v26, fa0
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v4f64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 2
-; LMULMAX1-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vfmv.v.f v25, fa0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a1)
define void @splat_zero_v8f16(<8 x half>* %x) {
; CHECK-LABEL: splat_zero_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v4f32(<4 x float>* %x) {
; CHECK-LABEL: splat_zero_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v2f64(<2 x double>* %x) {
; CHECK-LABEL: splat_zero_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_16f16(<16 x half>* %x) {
; LMULMAX2-LABEL: splat_zero_16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_16f16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_zero_v8f32(<8 x float>* %x) {
; LMULMAX2-LABEL: splat_zero_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v8f32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_zero_v4f64(<4 x double>* %x) {
; LMULMAX2-LABEL: splat_zero_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 4
-; LMULMAX2-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v4f64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 2
-; LMULMAX1-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a1)
define void @gather_const_v8f16(<8 x half>* %x) {
; CHECK-LABEL: gather_const_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 5
; CHECK-NEXT: vse16.v v26, (a0)
define void @gather_const_v4f32(<4 x float>* %x) {
; CHECK-LABEL: gather_const_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 2
; CHECK-NEXT: vse32.v v26, (a0)
define void @gather_const_v2f64(<2 x double>* %x) {
; CHECK-LABEL: gather_const_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 0
; CHECK-NEXT: vse64.v v26, (a0)
; LMULMAX1-LABEL: gather_const_v64f16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: addi a2, zero, 8
-; LMULMAX1-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (a1)
; LMULMAX1-NEXT: addi a6, a0, 16
; LMULMAX1-NEXT: addi a7, a0, 48
; LMULMAX1-LABEL: gather_const_v32f32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 64
-; LMULMAX1-NEXT: addi a2, zero, 4
-; LMULMAX1-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vle32.v v25, (a1)
; LMULMAX1-NEXT: addi a6, a0, 16
; LMULMAX1-NEXT: addi a7, a0, 48
define void @gather_const_v16f64(<16 x double>* %x) {
; LMULMAX8-LABEL: gather_const_v16f64:
; LMULMAX8: # %bb.0:
-; LMULMAX8-NEXT: addi a1, zero, 16
-; LMULMAX8-NEXT: vsetvli a1, a1, e64,m8,ta,mu
+; LMULMAX8-NEXT: vsetivli a1, 16, e64,m8,ta,mu
; LMULMAX8-NEXT: vle64.v v8, (a0)
; LMULMAX8-NEXT: vrgather.vi v16, v8, 10
; LMULMAX8-NEXT: vse64.v v16, (a0)
; LMULMAX1-LABEL: gather_const_v16f64:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 80
-; LMULMAX1-NEXT: addi a2, zero, 2
-; LMULMAX1-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a1)
; LMULMAX1-NEXT: addi a6, a0, 16
; LMULMAX1-NEXT: addi a7, a0, 48
define void @fadd_v8f16(<8 x half>* %x, <8 x half>* %y) {
; CHECK-LABEL: fadd_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfadd.vv v25, v25, v26
define void @fadd_v4f32(<4 x float>* %x, <4 x float>* %y) {
; CHECK-LABEL: fadd_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfadd.vv v25, v25, v26
define void @fadd_v2f64(<2 x double>* %x, <2 x double>* %y) {
; CHECK-LABEL: fadd_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfadd.vv v25, v25, v26
define void @fsub_v8f16(<8 x half>* %x, <8 x half>* %y) {
; CHECK-LABEL: fsub_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfsub.vv v25, v25, v26
define void @fsub_v4f32(<4 x float>* %x, <4 x float>* %y) {
; CHECK-LABEL: fsub_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfsub.vv v25, v25, v26
define void @fsub_v2f64(<2 x double>* %x, <2 x double>* %y) {
; CHECK-LABEL: fsub_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfsub.vv v25, v25, v26
define void @fmul_v8f16(<8 x half>* %x, <8 x half>* %y) {
; CHECK-LABEL: fmul_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfmul.vv v25, v25, v26
define void @fmul_v4f32(<4 x float>* %x, <4 x float>* %y) {
; CHECK-LABEL: fmul_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfmul.vv v25, v25, v26
define void @fmul_v2f64(<2 x double>* %x, <2 x double>* %y) {
; CHECK-LABEL: fmul_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfmul.vv v25, v25, v26
define void @fdiv_v8f16(<8 x half>* %x, <8 x half>* %y) {
; CHECK-LABEL: fdiv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfdiv.vv v25, v25, v26
define void @fdiv_v4f32(<4 x float>* %x, <4 x float>* %y) {
; CHECK-LABEL: fdiv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfdiv.vv v25, v25, v26
define void @fdiv_v2f64(<2 x double>* %x, <2 x double>* %y) {
; CHECK-LABEL: fdiv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfdiv.vv v25, v25, v26
define void @fneg_v8f16(<8 x half>* %x) {
; CHECK-LABEL: fneg_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfsgnjn.vv v25, v25, v25
; CHECK-NEXT: vse16.v v25, (a0)
define void @fneg_v4f32(<4 x float>* %x) {
; CHECK-LABEL: fneg_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfsgnjn.vv v25, v25, v25
; CHECK-NEXT: vse32.v v25, (a0)
define void @fneg_v2f64(<2 x double>* %x) {
; CHECK-LABEL: fneg_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfsgnjn.vv v25, v25, v25
; CHECK-NEXT: vse64.v v25, (a0)
define void @fabs_v8f16(<8 x half>* %x) {
; CHECK-LABEL: fabs_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfsgnjx.vv v25, v25, v25
; CHECK-NEXT: vse16.v v25, (a0)
define void @fabs_v4f32(<4 x float>* %x) {
; CHECK-LABEL: fabs_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfsgnjx.vv v25, v25, v25
; CHECK-NEXT: vse32.v v25, (a0)
define void @fabs_v2f64(<2 x double>* %x) {
; CHECK-LABEL: fabs_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfsgnjx.vv v25, v25, v25
; CHECK-NEXT: vse64.v v25, (a0)
define void @sqrt_v8f16(<8 x half>* %x) {
; CHECK-LABEL: sqrt_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfsqrt.v v25, v25
; CHECK-NEXT: vse16.v v25, (a0)
define void @sqrt_v4f32(<4 x float>* %x) {
; CHECK-LABEL: sqrt_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfsqrt.v v25, v25
; CHECK-NEXT: vse32.v v25, (a0)
define void @sqrt_v2f64(<2 x double>* %x) {
; CHECK-LABEL: sqrt_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfsqrt.v v25, v25
; CHECK-NEXT: vse64.v v25, (a0)
define void @fma_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) {
; CHECK-LABEL: fma_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vle16.v v27, (a2)
define void @fma_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) {
; CHECK-LABEL: fma_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a3, a3, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vle32.v v27, (a2)
define void @fma_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) {
; CHECK-LABEL: fma_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 2
-; CHECK-NEXT: vsetvli a3, a3, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vle64.v v27, (a2)
define void @fmsub_v8f16(<8 x half>* %x, <8 x half>* %y, <8 x half>* %z) {
; CHECK-LABEL: fmsub_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vle16.v v27, (a2)
define void @fnmsub_v4f32(<4 x float>* %x, <4 x float>* %y, <4 x float>* %z) {
; CHECK-LABEL: fnmsub_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 4
-; CHECK-NEXT: vsetvli a3, a3, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vle32.v v27, (a2)
define void @fnmadd_v2f64(<2 x double>* %x, <2 x double>* %y, <2 x double>* %z) {
; CHECK-LABEL: fnmadd_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 2
-; CHECK-NEXT: vsetvli a3, a3, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vle64.v v27, (a2)
define void @fadd_v16f16(<16 x half>* %x, <16 x half>* %y) {
; LMULMAX2-LABEL: fadd_v16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vfadd.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fadd_v16f16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fadd_v16f16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @fadd_v8f32(<8 x float>* %x, <8 x float>* %y) {
; LMULMAX2-LABEL: fadd_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vfadd.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fadd_v8f32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fadd_v8f32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @fadd_v4f64(<4 x double>* %x, <4 x double>* %y) {
; LMULMAX2-LABEL: fadd_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vfadd.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fadd_v4f64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fadd_v4f64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
define void @fsub_v16f16(<16 x half>* %x, <16 x half>* %y) {
; LMULMAX2-LABEL: fsub_v16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vfsub.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fsub_v16f16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fsub_v16f16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @fsub_v8f32(<8 x float>* %x, <8 x float>* %y) {
; LMULMAX2-LABEL: fsub_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vfsub.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fsub_v8f32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fsub_v8f32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @fsub_v4f64(<4 x double>* %x, <4 x double>* %y) {
; LMULMAX2-LABEL: fsub_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vfsub.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fsub_v4f64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fsub_v4f64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
define void @fmul_v16f16(<16 x half>* %x, <16 x half>* %y) {
; LMULMAX2-LABEL: fmul_v16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vfmul.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fmul_v16f16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fmul_v16f16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @fmul_v8f32(<8 x float>* %x, <8 x float>* %y) {
; LMULMAX2-LABEL: fmul_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vfmul.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fmul_v8f32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fmul_v8f32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @fmul_v4f64(<4 x double>* %x, <4 x double>* %y) {
; LMULMAX2-LABEL: fmul_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vfmul.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fmul_v4f64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fmul_v4f64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
define void @fdiv_v16f16(<16 x half>* %x, <16 x half>* %y) {
; LMULMAX2-LABEL: fdiv_v16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vfdiv.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fdiv_v16f16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fdiv_v16f16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @fdiv_v8f32(<8 x float>* %x, <8 x float>* %y) {
; LMULMAX2-LABEL: fdiv_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vfdiv.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fdiv_v8f32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fdiv_v8f32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @fdiv_v4f64(<4 x double>* %x, <4 x double>* %y) {
; LMULMAX2-LABEL: fdiv_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vfdiv.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: fdiv_v4f64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: fdiv_v4f64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
define void @fneg_v16f16(<16 x half>* %x) {
; LMULMAX2-LABEL: fneg_v16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vfsgnjn.vv v26, v26, v26
; LMULMAX2-NEXT: vse16.v v26, (a0)
;
; LMULMAX1-LABEL: fneg_v16f16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vle16.v v25, (a1)
; LMULMAX1-NEXT: vle16.v v26, (a0)
define void @fneg_v8f32(<8 x float>* %x) {
; LMULMAX2-LABEL: fneg_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vfsgnjn.vv v26, v26, v26
; LMULMAX2-NEXT: vse32.v v26, (a0)
;
; LMULMAX1-LABEL: fneg_v8f32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vle32.v v25, (a1)
; LMULMAX1-NEXT: vle32.v v26, (a0)
define void @fneg_v4f64(<4 x double>* %x) {
; LMULMAX2-LABEL: fneg_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 4
-; LMULMAX2-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vfsgnjn.vv v26, v26, v26
; LMULMAX2-NEXT: vse64.v v26, (a0)
;
; LMULMAX1-LABEL: fneg_v4f64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 2
-; LMULMAX1-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vle64.v v25, (a1)
; LMULMAX1-NEXT: vle64.v v26, (a0)
define void @fma_v16f16(<16 x half>* %x, <16 x half>* %y, <16 x half>* %z) {
; LMULMAX2-LABEL: fma_v16f16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a3, zero, 16
-; LMULMAX2-NEXT: vsetvli a3, a3, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a3, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vle16.v v30, (a2)
;
; LMULMAX1-LABEL: fma_v16f16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a3, zero, 8
-; LMULMAX1-NEXT: vsetvli a3, a3, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a3, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (a0)
; LMULMAX1-NEXT: addi a3, a0, 16
; LMULMAX1-NEXT: vle16.v v26, (a3)
define void @fma_v8f32(<8 x float>* %x, <8 x float>* %y, <8 x float>* %z) {
; LMULMAX2-LABEL: fma_v8f32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a3, zero, 8
-; LMULMAX2-NEXT: vsetvli a3, a3, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a3, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vle32.v v30, (a2)
;
; LMULMAX1-LABEL: fma_v8f32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a3, zero, 4
-; LMULMAX1-NEXT: vsetvli a3, a3, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a3, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vle32.v v25, (a0)
; LMULMAX1-NEXT: addi a3, a0, 16
; LMULMAX1-NEXT: vle32.v v26, (a3)
define void @fma_v4f64(<4 x double>* %x, <4 x double>* %y, <4 x double>* %z) {
; LMULMAX2-LABEL: fma_v4f64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a3, zero, 4
-; LMULMAX2-NEXT: vsetvli a3, a3, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a3, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vle64.v v30, (a2)
;
; LMULMAX1-LABEL: fma_v4f64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a3, zero, 2
-; LMULMAX1-NEXT: vsetvli a3, a3, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a3, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a0)
; LMULMAX1-NEXT: addi a3, a0, 16
; LMULMAX1-NEXT: vle64.v v26, (a3)
define void @fadd_vf_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fadd_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfadd.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fadd_vf_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fadd_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfadd.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fadd_vf_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fadd_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfadd.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fadd_fv_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fadd_fv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfadd.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fadd_fv_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fadd_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfadd.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fadd_fv_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fadd_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfadd.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fsub_vf_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fsub_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfsub.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fsub_vf_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fsub_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfsub.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fsub_vf_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fsub_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfsub.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fsub_fv_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fsub_fv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfrsub.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fsub_fv_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fsub_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfrsub.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fsub_fv_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fsub_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfrsub.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fmul_vf_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fmul_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfmul.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fmul_vf_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fmul_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfmul.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fmul_vf_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fmul_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfmul.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fmul_fv_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fmul_fv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfmul.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fmul_fv_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fmul_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfmul.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fmul_fv_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fmul_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfmul.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fdiv_vf_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fdiv_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfdiv.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fdiv_vf_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fdiv_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfdiv.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fdiv_vf_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fdiv_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfdiv.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fdiv_fv_v8f16(<8 x half>* %x, half %y) {
; CHECK-LABEL: fdiv_fv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vfrdiv.vf v25, v25, fa0
; CHECK-NEXT: vse16.v v25, (a0)
define void @fdiv_fv_v4f32(<4 x float>* %x, float %y) {
; CHECK-LABEL: fdiv_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vfrdiv.vf v25, v25, fa0
; CHECK-NEXT: vse32.v v25, (a0)
define void @fdiv_fv_v2f64(<2 x double>* %x, double %y) {
; CHECK-LABEL: fdiv_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vfrdiv.vf v25, v25, fa0
; CHECK-NEXT: vse64.v v25, (a0)
define void @fma_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
; CHECK-LABEL: fma_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfmacc.vf v26, fa0, v25
define void @fma_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
; CHECK-LABEL: fma_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfmacc.vf v26, fa0, v25
define void @fma_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
; CHECK-LABEL: fma_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfmacc.vf v26, fa0, v25
define void @fma_fv_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
; CHECK-LABEL: fma_fv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfmacc.vf v26, fa0, v25
define void @fma_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
; CHECK-LABEL: fma_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfmacc.vf v26, fa0, v25
define void @fma_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
; CHECK-LABEL: fma_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfmacc.vf v26, fa0, v25
define void @fmsub_vf_v8f16(<8 x half>* %x, <8 x half>* %y, half %z) {
; CHECK-LABEL: fmsub_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vfmsac.vf v26, fa0, v25
define void @fnmsub_vf_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
; CHECK-LABEL: fnmsub_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfnmsac.vf v26, fa0, v25
define void @fnmadd_vf_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
; CHECK-LABEL: fnmadd_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfnmacc.vf v26, fa0, v25
define void @fnmsub_fv_v4f32(<4 x float>* %x, <4 x float>* %y, float %z) {
; CHECK-LABEL: fnmsub_fv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vfnmsac.vf v26, fa0, v25
define void @fnmadd_fv_v2f64(<2 x double>* %x, <2 x double>* %y, double %z) {
; CHECK-LABEL: fnmadd_fv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vfnmacc.vf v26, fa0, v25
; RV32-NEXT: andi sp, sp, -32
; RV32-NEXT: sw a2, 32(sp)
; RV32-NEXT: sw a1, 64(sp)
-; RV32-NEXT: addi a1, zero, 8
-; RV32-NEXT: vsetvli a2, a1, e32,m2,ta,mu
+; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; RV32-NEXT: vle32.v v26, (a0)
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a2, v26
-; RV32-NEXT: sw a2, 0(sp)
-; RV32-NEXT: vsetvli a2, a1, e32,m2,ta,mu
-; RV32-NEXT: addi a2, sp, 32
-; RV32-NEXT: vle32.v v28, (a2)
+; RV32-NEXT: vmv.x.s a1, v26
+; RV32-NEXT: sw a1, 0(sp)
+; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu
+; RV32-NEXT: addi a1, sp, 32
+; RV32-NEXT: vle32.v v28, (a1)
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a2, v28
-; RV32-NEXT: sw a2, 28(sp)
-; RV32-NEXT: vsetvli a2, a1, e32,m2,ta,mu
-; RV32-NEXT: addi a2, sp, 64
-; RV32-NEXT: vle32.v v28, (a2)
+; RV32-NEXT: vmv.x.s a1, v28
+; RV32-NEXT: sw a1, 28(sp)
+; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu
+; RV32-NEXT: addi a1, sp, 64
+; RV32-NEXT: vle32.v v28, (a1)
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a2, v28
-; RV32-NEXT: sw a2, 24(sp)
-; RV32-NEXT: addi a2, zero, 1
-; RV32-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT: vmv.x.s a1, v28
+; RV32-NEXT: sw a1, 24(sp)
+; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v28, v26, 5
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a3, v28
-; RV32-NEXT: sw a3, 20(sp)
-; RV32-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT: vmv.x.s a1, v28
+; RV32-NEXT: sw a1, 20(sp)
+; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v28, v26, 4
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a3, v28
-; RV32-NEXT: sw a3, 16(sp)
-; RV32-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT: vmv.x.s a1, v28
+; RV32-NEXT: sw a1, 16(sp)
+; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v28, v26, 3
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a3, v28
-; RV32-NEXT: sw a3, 12(sp)
-; RV32-NEXT: vsetvli a3, a2, e32,m2,ta,mu
+; RV32-NEXT: vmv.x.s a1, v28
+; RV32-NEXT: sw a1, 12(sp)
+; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v28, v26, 2
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a3, v28
-; RV32-NEXT: sw a3, 8(sp)
-; RV32-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; RV32-NEXT: vmv.x.s a1, v28
+; RV32-NEXT: sw a1, 8(sp)
+; RV32-NEXT: vsetivli a1, 1, e32,m2,ta,mu
; RV32-NEXT: vslidedown.vi v26, v26, 1
; RV32-NEXT: vsetvli zero, zero, e32,m2,ta,mu
-; RV32-NEXT: vmv.x.s a2, v26
-; RV32-NEXT: sw a2, 4(sp)
-; RV32-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; RV32-NEXT: vmv.x.s a1, v26
+; RV32-NEXT: sw a1, 4(sp)
+; RV32-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; RV32-NEXT: vle32.v v26, (sp)
; RV32-NEXT: vse32.v v26, (a0)
; RV32-NEXT: addi sp, s0, -128
; RV64-NEXT: addi s0, sp, 96
; RV64-NEXT: .cfi_def_cfa s0, 0
; RV64-NEXT: andi sp, sp, -32
-; RV64-NEXT: addi a2, zero, 4
-; RV64-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; RV64-NEXT: vle64.v v26, (a0)
; RV64-NEXT: sd a1, 32(sp)
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV64-NEXT: vmv.x.s a1, v26
; RV64-NEXT: sd a1, 0(sp)
-; RV64-NEXT: vsetvli a1, a2, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; RV64-NEXT: addi a1, sp, 32
; RV64-NEXT: vle64.v v28, (a1)
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV64-NEXT: vmv.x.s a1, v28
; RV64-NEXT: sd a1, 24(sp)
-; RV64-NEXT: addi a1, zero, 1
-; RV64-NEXT: vsetvli a3, a1, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 1, e64,m2,ta,mu
; RV64-NEXT: vslidedown.vi v28, v26, 2
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
-; RV64-NEXT: vmv.x.s a3, v28
-; RV64-NEXT: sd a3, 16(sp)
-; RV64-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; RV64-NEXT: vmv.x.s a1, v28
+; RV64-NEXT: sd a1, 16(sp)
+; RV64-NEXT: vsetivli a1, 1, e64,m2,ta,mu
; RV64-NEXT: vslidedown.vi v26, v26, 1
; RV64-NEXT: vsetvli zero, zero, e64,m2,ta,mu
; RV64-NEXT: vmv.x.s a1, v26
; RV64-NEXT: sd a1, 8(sp)
-; RV64-NEXT: vsetvli a1, a2, e64,m2,ta,mu
+; RV64-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; RV64-NEXT: vle64.v v26, (sp)
; RV64-NEXT: vse64.v v26, (a0)
; RV64-NEXT: addi sp, s0, -96
define void @buildvec_vid_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: buildvec_vid_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vid.v v25
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @buildvec_vid_undefelts_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: buildvec_vid_undefelts_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vid.v v25
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI2_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI2_0)
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a1)
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI3_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI3_0)
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a1)
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: lui a1, %hi(.LCPI4_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI4_0)
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a1)
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @seteq_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: seteq_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmseq.vv v0, v25, v26
define void @setge_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
; CHECK-LABEL: setge_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmsle.vv v27, v26, v25
define void @setle_vv_v16i8(<16 x i8>* %x, <16 x i8>* %y, <16 x i1>* %z) {
; CHECK-LABEL: setle_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmsle.vv v27, v25, v26
define void @setule_vv_v8i8(<8 x i8>* %x, <8 x i8>* %y, <8 x i1>* %z) {
; CHECK-LABEL: setule_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmsleu.vv v27, v25, v26
define void @seteq_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
; CHECK-LABEL: seteq_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmseq.vx v26, v25, a1
; CHECK-NEXT: vse1.v v26, (a2)
define void @setge_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
; CHECK-LABEL: setge_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: vmsle.vv v27, v26, v25
define void @setle_vx_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
; CHECK-LABEL: setle_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmsle.vx v26, v25, a1
; CHECK-NEXT: vse1.v v26, (a2)
define void @setule_vx_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
; CHECK-LABEL: setule_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmsleu.vx v26, v25, a1
; CHECK-NEXT: vse1.v v26, (a2)
define void @seteq_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
; CHECK-LABEL: seteq_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmseq.vx v26, v25, a1
; CHECK-NEXT: vse1.v v26, (a2)
define void @setge_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
; CHECK-LABEL: setge_xv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmsle.vx v26, v25, a1
; CHECK-NEXT: vse1.v v26, (a2)
define void @setle_xv_v16i8(<16 x i8>* %x, i8 %y, <16 x i1>* %z) {
; CHECK-LABEL: setle_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: vmsle.vv v27, v26, v25
define void @setule_xv_v8i8(<8 x i8>* %x, i8 %y, <8 x i1>* %z) {
; CHECK-LABEL: setule_xv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a3, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmv.v.x v26, a1
; CHECK-NEXT: vmsleu.vv v27, v26, v25
define void @seteq_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
; CHECK-LABEL: seteq_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmseq.vi v26, v25, 0
; CHECK-NEXT: vse1.v v26, (a1)
define void @setge_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
; CHECK-LABEL: setge_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmv.v.i v26, 0
; CHECK-NEXT: vmsle.vv v27, v26, v25
define void @setle_vi_v16i8(<16 x i8>* %x, <16 x i1>* %z) {
; CHECK-LABEL: setle_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmsle.vi v26, v25, 0
; CHECK-NEXT: vse1.v v26, (a1)
define void @setule_vi_v8i8(<8 x i8>* %x, <8 x i1>* %z) {
; CHECK-LABEL: setule_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmsleu.vi v26, v25, 5
; CHECK-NEXT: vse1.v v26, (a1)
define void @seteq_vv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: seteq_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmseq.vv v0, v25, v26
define void @setne_vv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: setne_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmsne.vv v0, v25, v26
define void @setgt_vv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: setgt_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmslt.vv v0, v26, v25
define void @setlt_vv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; CHECK-LABEL: setlt_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vle16.v v28, (a1)
; CHECK-NEXT: vmslt.vv v0, v26, v28
define void @setugt_vv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; CHECK-LABEL: setugt_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
; CHECK-NEXT: vmsltu.vv v0, v28, v26
define void @setult_vv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; CHECK-LABEL: setult_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; CHECK-NEXT: vle64.v v26, (a0)
; CHECK-NEXT: vle64.v v28, (a1)
; CHECK-NEXT: vmsltu.vv v0, v26, v28
define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: splat_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: splat_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: splat_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
;
; LMULMAX1-LABEL: splat_v32i8:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 16
-; LMULMAX1-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse8.v v25, (a1)
define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
; LMULMAX2-LABEL: splat_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v16i16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 8
-; LMULMAX1-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
; LMULMAX2-LABEL: splat_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v8i32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 4
-; LMULMAX1-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_zero_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: splat_zero_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: splat_zero_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: splat_zero_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v2i64(<2 x i64>* %x) {
; CHECK-LABEL: splat_zero_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v32i8:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 16
-; LMULMAX1-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse8.v v25, (a1)
define void @splat_zero_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: splat_zero_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v16i16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_zero_v8i32(<8 x i32>* %x) {
; LMULMAX2-LABEL: splat_zero_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v8i32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_zero_v4i64(<4 x i64>* %x) {
; LMULMAX2-LABEL: splat_zero_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v4i64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_allones_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: splat_allones_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_allones_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: splat_allones_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_allones_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: splat_allones_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_allones_v2i64(<2 x i64>* %x) {
; CHECK-LABEL: splat_allones_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v32i8:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 16
-; LMULMAX1-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse8.v v25, (a1)
define void @splat_allones_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: splat_allones_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, -1
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v16i16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_allones_v8i32(<8 x i32>* %x) {
; LMULMAX2-LABEL: splat_allones_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, -1
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v8i32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_allones_v4i64(<4 x i64>* %x) {
; LMULMAX2-LABEL: splat_allones_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, -1
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v4i64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_allones_with_use_v4i64(<4 x i64>* %x) {
; LMULMAX2-LABEL: splat_allones_with_use_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v28, -1
-; LMULMAX2-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vadd.vv v26, v26, v28
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_with_use_v4i64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 2
-; LMULMAX1-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a0)
-; LMULMAX1-NEXT: addi a2, a0, 16
-; LMULMAX1-NEXT: vle64.v v26, (a2)
-; LMULMAX1-NEXT: addi a3, zero, 4
-; LMULMAX1-NEXT: vsetvli a3, a3, e32,m1,ta,mu
+; LMULMAX1-NEXT: addi a1, a0, 16
+; LMULMAX1-NEXT: vle64.v v26, (a1)
+; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v27, -1
-; LMULMAX1-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vadd.vv v26, v26, v27
; LMULMAX1-NEXT: vadd.vv v25, v25, v27
; LMULMAX1-NEXT: vse64.v v25, (a0)
-; LMULMAX1-NEXT: vse64.v v26, (a2)
+; LMULMAX1-NEXT: vse64.v v26, (a1)
; LMULMAX1-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%b = add <4 x i64> %a, <i64 -1, i64 -1, i64 -1, i64 -1>
define void @splat_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: splat_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: splat_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: splat_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_v2i64(<2 x i64>* %x, i64 %y) {
; CHECK-LABEL: splat_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: ret
;
; LMULMAX1-LABEL: splat_v32i8:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 16
-; LMULMAX1-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse8.v v25, (a1)
define void @splat_v16i16(<16 x i16>* %x, i16 %y) {
; LMULMAX2-LABEL: splat_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v16i16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 8
-; LMULMAX1-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_v8i32(<8 x i32>* %x, i32 %y) {
; LMULMAX2-LABEL: splat_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v8i32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 4
-; LMULMAX1-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_v4i64(<4 x i64>* %x, i64 %y) {
; LMULMAX2-LABEL: splat_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.x v26, a1
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_v4i64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a2, zero, 2
-; LMULMAX1-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.x v25, a1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a1)
define void @splat_zero_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: splat_zero_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: splat_zero_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: splat_zero_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zero_v2i64(<2 x i64>* %x) {
; CHECK-LABEL: splat_zero_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v32i8:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 16
-; LMULMAX1-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse8.v v25, (a1)
define void @splat_zero_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: splat_zero_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v16i16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_zero_v8i32(<8 x i32>* %x) {
; LMULMAX2-LABEL: splat_zero_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v8i32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_zero_v4i64(<4 x i64>* %x) {
; LMULMAX2-LABEL: splat_zero_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 4
-; LMULMAX2-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, 0
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_zero_v4i64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 2
-; LMULMAX1-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, 0
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a1)
define void @splat_allones_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: splat_allones_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_allones_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: splat_allones_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse16.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_allones_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: splat_allones_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse32.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_allones_v2i64(<2 x i64>* %x) {
; CHECK-LABEL: splat_allones_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vse64.v v25, (a0)
; CHECK-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v32i8:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 16
-; LMULMAX1-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse8.v v25, (a1)
define void @splat_allones_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: splat_allones_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, -1
; LMULMAX2-NEXT: vse16.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v16i16:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 8
-; LMULMAX1-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse16.v v25, (a1)
define void @splat_allones_v8i32(<8 x i32>* %x) {
; LMULMAX2-LABEL: splat_allones_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, -1
; LMULMAX2-NEXT: vse32.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v8i32:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 4
-; LMULMAX1-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse32.v v25, (a1)
define void @splat_allones_v4i64(<4 x i64>* %x) {
; LMULMAX2-LABEL: splat_allones_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 4
-; LMULMAX2-NEXT: vsetvli a1, a1, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vmv.v.i v26, -1
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-LABEL: splat_allones_v4i64:
; LMULMAX1: # %bb.0:
-; LMULMAX1-NEXT: addi a1, zero, 2
-; LMULMAX1-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v25, -1
; LMULMAX1-NEXT: addi a1, a0, 16
; LMULMAX1-NEXT: vse64.v v25, (a1)
define void @gather_const_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: gather_const_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 12
; CHECK-NEXT: vse8.v v26, (a0)
define void @gather_const_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: gather_const_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 5
; CHECK-NEXT: vse16.v v26, (a0)
define void @gather_const_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: gather_const_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 3
; CHECK-NEXT: vse32.v v26, (a0)
define void @gather_const_v2i64(<2 x i64>* %x) {
; CHECK-LABEL: gather_const_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vrgather.vi v26, v25, 1
; CHECK-NEXT: vse64.v v26, (a0)
; LMULMAX1-LABEL: gather_const_v64i8:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: addi a2, zero, 16
-; LMULMAX1-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-NEXT: vle8.v v25, (a1)
; LMULMAX1-NEXT: addi a2, a0, 16
; LMULMAX1-NEXT: addi a3, a0, 48
; LMULMAX1-LABEL: gather_const_v16i16:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 48
-; LMULMAX1-NEXT: addi a2, zero, 8
-; LMULMAX1-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-NEXT: vle16.v v25, (a1)
; LMULMAX1-NEXT: addi a2, a0, 16
; LMULMAX1-NEXT: addi a3, a0, 32
define void @gather_const_v16i32(<16 x i32>* %x) {
; LMULMAX4-LABEL: gather_const_v16i32:
; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a1, zero, 16
-; LMULMAX4-NEXT: vsetvli a1, a1, e32,m4,ta,mu
+; LMULMAX4-NEXT: vsetivli a1, 16, e32,m4,ta,mu
; LMULMAX4-NEXT: vle32.v v28, (a0)
; LMULMAX4-NEXT: vrgather.vi v8, v28, 9
; LMULMAX4-NEXT: vse32.v v8, (a0)
; LMULMAX1-LABEL: gather_const_v16i32:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 32
-; LMULMAX1-NEXT: addi a2, zero, 4
-; LMULMAX1-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-NEXT: vle32.v v25, (a1)
; LMULMAX1-NEXT: addi a2, a0, 16
; LMULMAX1-NEXT: addi a3, a0, 48
define void @gather_const_v8i64(<8 x i64>* %x) {
; LMULMAX4-LABEL: gather_const_v8i64:
; LMULMAX4: # %bb.0:
-; LMULMAX4-NEXT: addi a1, zero, 8
-; LMULMAX4-NEXT: vsetvli a1, a1, e64,m4,ta,mu
+; LMULMAX4-NEXT: vsetivli a1, 8, e64,m4,ta,mu
; LMULMAX4-NEXT: vle64.v v28, (a0)
; LMULMAX4-NEXT: vrgather.vi v8, v28, 3
; LMULMAX4-NEXT: vse64.v v8, (a0)
; LMULMAX1-LABEL: gather_const_v8i64:
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: addi a1, a0, 16
-; LMULMAX1-NEXT: addi a2, zero, 2
-; LMULMAX1-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vle64.v v25, (a1)
; LMULMAX1-NEXT: addi a2, a0, 48
; LMULMAX1-NEXT: addi a3, a0, 32
define void @add_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: add_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vadd.vv v25, v25, v26
define void @add_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: add_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vadd.vv v25, v25, v26
define void @add_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: add_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vadd.vv v25, v25, v26
define void @add_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: add_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vadd.vv v25, v25, v26
define void @sub_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: sub_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vsub.vv v25, v25, v26
define void @sub_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: sub_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vsub.vv v25, v25, v26
define void @sub_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: sub_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vsub.vv v25, v25, v26
define void @sub_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: sub_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vsub.vv v25, v25, v26
define void @mul_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: mul_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmul.vv v25, v25, v26
define void @mul_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: mul_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmul.vv v25, v25, v26
define void @mul_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: mul_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmul.vv v25, v25, v26
define void @mul_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: mul_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmul.vv v25, v25, v26
define void @and_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: and_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vand.vv v25, v25, v26
define void @and_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: and_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vand.vv v25, v25, v26
define void @and_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: and_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vand.vv v25, v25, v26
define void @and_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: and_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vand.vv v25, v25, v26
define void @or_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: or_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vor.vv v25, v25, v26
define void @or_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: or_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vor.vv v25, v25, v26
define void @or_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: or_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vor.vv v25, v25, v26
define void @or_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: or_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vor.vv v25, v25, v26
define void @xor_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: xor_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vxor.vv v25, v25, v26
define void @xor_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: xor_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vxor.vv v25, v25, v26
define void @xor_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: xor_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vxor.vv v25, v25, v26
define void @xor_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: xor_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vxor.vv v25, v25, v26
define void @lshr_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: lshr_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vsrl.vv v25, v25, v26
define void @lshr_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: lshr_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vsrl.vv v25, v25, v26
define void @lshr_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: lshr_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vsrl.vv v25, v25, v26
define void @lshr_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: lshr_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vsrl.vv v25, v25, v26
define void @ashr_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: ashr_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vsra.vv v25, v25, v26
define void @ashr_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: ashr_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vsra.vv v25, v25, v26
define void @ashr_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: ashr_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vsra.vv v25, v25, v26
define void @ashr_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: ashr_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vsra.vv v25, v25, v26
define void @shl_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: shl_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vsll.vv v25, v25, v26
define void @shl_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: shl_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vsll.vv v25, v25, v26
define void @shl_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: shl_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vsll.vv v25, v25, v26
define void @shl_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: shl_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vsll.vv v25, v25, v26
define void @sdiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: sdiv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vdiv.vv v25, v25, v26
define void @sdiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: sdiv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vdiv.vv v25, v25, v26
define void @sdiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: sdiv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vdiv.vv v25, v25, v26
define void @sdiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: sdiv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vdiv.vv v25, v25, v26
define void @srem_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: srem_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vrem.vv v25, v25, v26
define void @srem_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: srem_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vrem.vv v25, v25, v26
define void @srem_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: srem_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vrem.vv v25, v25, v26
define void @srem_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: srem_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vrem.vv v25, v25, v26
define void @udiv_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: udiv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vdivu.vv v25, v25, v26
define void @udiv_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: udiv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vdivu.vv v25, v25, v26
define void @udiv_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: udiv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vdivu.vv v25, v25, v26
define void @udiv_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: udiv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vdivu.vv v25, v25, v26
define void @urem_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: urem_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vremu.vv v25, v25, v26
define void @urem_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: urem_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vremu.vv v25, v25, v26
define void @urem_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: urem_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vremu.vv v25, v25, v26
define void @urem_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: urem_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vremu.vv v25, v25, v26
define void @mulhu_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: mulhu_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: lui a1, %hi(.LCPI52_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI52_0)
define void @mulhu_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: mulhu_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: lui a1, %hi(.LCPI53_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI53_0)
define void @mulhu_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: mulhu_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: lui a1, %hi(.LCPI54_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI54_0)
define void @mulhu_v2i64(<2 x i64>* %x) {
; LMULMAX1-RV32-LABEL: mulhu_v2i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI55_0)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI55_0)
-; LMULMAX1-RV32-NEXT: addi a3, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI55_0)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI55_0)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmulhu.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI55_1)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI55_1)
-; LMULMAX1-RV32-NEXT: vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI55_1)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI55_1)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vsrl.vv v25, v25, v26
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: mulhu_v2i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI55_0)
; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI55_0)
define void @mulhs_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: mulhs_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: lui a1, %hi(.LCPI56_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI56_0)
define void @mulhs_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: mulhs_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: lui a1, %hi(.LCPI57_0)
; CHECK-NEXT: addi a1, a1, %lo(.LCPI57_0)
define void @mulhs_v4i32(<4 x i32>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_v4i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI58_0)
; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI58_0)
;
; LMULMAX1-RV64-LABEL: mulhs_v4i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI58_0)
; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI58_0)
define void @mulhs_v2i64(<2 x i64>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_v2i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI59_0)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI59_0)
-; LMULMAX1-RV32-NEXT: addi a3, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI59_0)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI59_0)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmul.vv v26, v25, v26
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI59_1)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI59_1)
-; LMULMAX1-RV32-NEXT: vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v27, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI59_1)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI59_1)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v27, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmulh.vv v25, v25, v27
; LMULMAX1-RV32-NEXT: vadd.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI59_2)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI59_2)
-; LMULMAX1-RV32-NEXT: vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI59_2)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI59_2)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vsrl.vv v26, v25, v26
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI59_3)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI59_3)
-; LMULMAX1-RV32-NEXT: vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v27, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI59_3)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI59_3)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v27, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vsra.vv v25, v25, v27
; LMULMAX1-RV32-NEXT: vadd.vv v25, v25, v26
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
;
; LMULMAX1-RV64-LABEL: mulhs_v2i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, %hi(.LCPI59_0)
; LMULMAX1-RV64-NEXT: addi a1, a1, %lo(.LCPI59_0)
define void @smin_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: smin_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmin.vv v25, v25, v26
define void @smin_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: smin_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmin.vv v25, v25, v26
define void @smin_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: smin_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmin.vv v25, v25, v26
define void @smin_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: smin_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmin.vv v25, v25, v26
define void @smax_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: smax_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmax.vv v25, v25, v26
define void @smax_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: smax_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmax.vv v25, v25, v26
define void @smax_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: smax_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmax.vv v25, v25, v26
define void @smax_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: smax_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmax.vv v25, v25, v26
define void @umin_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: umin_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vminu.vv v25, v25, v26
define void @umin_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: umin_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vminu.vv v25, v25, v26
define void @umin_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: umin_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vminu.vv v25, v25, v26
define void @umin_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: umin_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vminu.vv v25, v25, v26
define void @umax_v16i8(<16 x i8>* %x, <16 x i8>* %y) {
; CHECK-LABEL: umax_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vle8.v v26, (a1)
; CHECK-NEXT: vmaxu.vv v25, v25, v26
define void @umax_v8i16(<8 x i16>* %x, <8 x i16>* %y) {
; CHECK-LABEL: umax_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vle16.v v26, (a1)
; CHECK-NEXT: vmaxu.vv v25, v25, v26
define void @umax_v4i32(<4 x i32>* %x, <4 x i32>* %y) {
; CHECK-LABEL: umax_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vle32.v v26, (a1)
; CHECK-NEXT: vmaxu.vv v25, v25, v26
define void @umax_v2i64(<2 x i64>* %x, <2 x i64>* %y) {
; CHECK-LABEL: umax_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; CHECK-NEXT: vle64.v v25, (a0)
; CHECK-NEXT: vle64.v v26, (a1)
; CHECK-NEXT: vmaxu.vv v25, v25, v26
;
; LMULMAX1-RV32-LABEL: add_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: add_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @add_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: add_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vadd.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: add_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: add_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @add_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: add_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vadd.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: add_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: add_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @add_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: add_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vadd.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: add_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: add_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: sub_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sub_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @sub_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: sub_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vsub.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: sub_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sub_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @sub_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: sub_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vsub.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: sub_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sub_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @sub_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: sub_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vsub.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: sub_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sub_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: mul_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: mul_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @mul_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: mul_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vmul.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: mul_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: mul_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @mul_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: mul_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vmul.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: mul_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: mul_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @mul_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: mul_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vmul.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: mul_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: mul_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: and_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: and_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @and_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: and_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vand.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: and_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: and_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @and_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: and_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vand.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: and_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: and_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @and_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: and_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vand.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: and_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: and_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: or_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: or_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @or_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: or_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vor.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: or_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: or_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @or_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: or_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vor.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: or_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: or_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @or_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: or_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vor.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: or_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: or_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: xor_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: xor_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @xor_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: xor_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vxor.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: xor_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: xor_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @xor_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: xor_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vxor.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: xor_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: xor_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @xor_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: xor_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vxor.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: xor_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: xor_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: lshr_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: lshr_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @lshr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: lshr_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vsrl.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: lshr_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: lshr_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @lshr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: lshr_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vsrl.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: lshr_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: lshr_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @lshr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: lshr_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vsrl.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: lshr_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: lshr_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: ashr_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: ashr_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @ashr_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: ashr_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vsra.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: ashr_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: ashr_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @ashr_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: ashr_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vsra.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: ashr_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: ashr_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @ashr_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: ashr_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vsra.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: ashr_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: ashr_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: shl_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: shl_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @shl_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: shl_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vsll.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: shl_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: shl_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @shl_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: shl_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vsll.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: shl_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: shl_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @shl_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: shl_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vsll.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: shl_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: shl_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: sdiv_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sdiv_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @sdiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: sdiv_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vdiv.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: sdiv_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sdiv_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @sdiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: sdiv_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vdiv.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: sdiv_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sdiv_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @sdiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: sdiv_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vdiv.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: sdiv_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: sdiv_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: srem_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: srem_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @srem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: srem_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vrem.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: srem_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: srem_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @srem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: srem_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vrem.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: srem_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: srem_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @srem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: srem_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vrem.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: srem_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: srem_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: udiv_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: udiv_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @udiv_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: udiv_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vdivu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: udiv_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: udiv_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @udiv_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: udiv_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vdivu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: udiv_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: udiv_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @udiv_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: udiv_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vdivu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: udiv_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: udiv_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: urem_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: urem_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @urem_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: urem_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vremu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: urem_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: urem_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @urem_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: urem_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vremu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: urem_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: urem_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @urem_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: urem_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vremu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: urem_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: urem_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
define void @extract_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: extract_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a3, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
-; LMULMAX2-NEXT: vsetvli a1, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vadd.vv v26, v26, v28
; LMULMAX2-NEXT: vse64.v v26, (a0)
; LMULMAX2-NEXT: ret
;
; LMULMAX1-RV32-LABEL: extract_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: addi a3, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v26, (a3)
+; LMULMAX1-RV32-NEXT: addi a2, a0, 16
+; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
; LMULMAX1-RV32-NEXT: vle64.v v27, (a1)
; LMULMAX1-RV32-NEXT: addi a1, a1, 16
; LMULMAX1-RV32-NEXT: vle64.v v28, (a1)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vadd.vv v26, v26, v28
; LMULMAX1-RV32-NEXT: vadd.vv v25, v25, v27
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v26, (a3)
+; LMULMAX1-RV32-NEXT: vse64.v v26, (a2)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: extract_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a3, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV64-NEXT: addi a3, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v26, (a3)
+; LMULMAX1-RV64-NEXT: addi a2, a0, 16
+; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
; LMULMAX1-RV64-NEXT: vle64.v v27, (a1)
; LMULMAX1-RV64-NEXT: addi a1, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v28, (a1)
-; LMULMAX1-RV64-NEXT: vsetvli a1, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v28
; LMULMAX1-RV64-NEXT: vadd.vv v25, v25, v27
; LMULMAX1-RV64-NEXT: vse64.v v25, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v26, (a3)
+; LMULMAX1-RV64-NEXT: vse64.v v26, (a2)
; LMULMAX1-RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%b = load <4 x i64>, <4 x i64>* %y
;
; LMULMAX1-RV32-LABEL: mulhu_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v25, (a1)
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI129_0)
;
; LMULMAX1-RV64-LABEL: mulhu_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle8.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI129_0)
define void @mulhu_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: mulhu_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: lui a1, %hi(.LCPI130_0)
; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI130_0)
;
; LMULMAX1-RV32-LABEL: mulhu_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v25, (a1)
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI130_0)
;
; LMULMAX1-RV64-LABEL: mulhu_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle16.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI130_0)
define void @mulhu_v8i32(<8 x i32>* %x) {
; LMULMAX2-LABEL: mulhu_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 8
-; LMULMAX2-NEXT: vsetvli a1, a1, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: lui a1, %hi(.LCPI131_0)
; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI131_0)
;
; LMULMAX1-RV32-LABEL: mulhu_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v25, (a1)
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI131_0)
;
; LMULMAX1-RV64-LABEL: mulhu_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle32.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI131_0)
define void @mulhu_v4i64(<4 x i64>* %x) {
; LMULMAX1-RV32-LABEL: mulhu_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
-; LMULMAX1-RV32-NEXT: lui a3, %hi(.LCPI132_0)
-; LMULMAX1-RV32-NEXT: addi a3, a3, %lo(.LCPI132_0)
-; LMULMAX1-RV32-NEXT: addi a4, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a5, a4, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v27, (a3)
-; LMULMAX1-RV32-NEXT: vsetvli a3, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: addi a1, a0, 16
+; LMULMAX1-RV32-NEXT: vle64.v v26, (a1)
+; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI132_0)
+; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI132_0)
+; LMULMAX1-RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v27, (a2)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vdivu.vv v26, v26, v27
-; LMULMAX1-RV32-NEXT: lui a3, %hi(.LCPI132_1)
-; LMULMAX1-RV32-NEXT: addi a3, a3, %lo(.LCPI132_1)
-; LMULMAX1-RV32-NEXT: vsetvli a4, a4, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v27, (a3)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI132_1)
+; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI132_1)
+; LMULMAX1-RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v27, (a2)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vdivu.vv v25, v25, v27
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v26, (a2)
+; LMULMAX1-RV32-NEXT: vse64.v v26, (a1)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: mulhu_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle64.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI132_0)
;
; LMULMAX1-RV32-LABEL: mulhs_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v25, (a1)
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI133_0)
;
; LMULMAX1-RV64-LABEL: mulhs_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle8.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI133_0)
define void @mulhs_v16i16(<16 x i16>* %x) {
; LMULMAX2-LABEL: mulhs_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a1, zero, 16
-; LMULMAX2-NEXT: vsetvli a1, a1, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: lui a1, %hi(.LCPI134_0)
; LMULMAX2-NEXT: addi a1, a1, %lo(.LCPI134_0)
;
; LMULMAX1-RV32-LABEL: mulhs_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v25, (a1)
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI134_0)
;
; LMULMAX1-RV64-LABEL: mulhs_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle16.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI134_0)
define void @mulhs_v8i32(<8 x i32>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: addi a1, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v25, (a1)
; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI135_0)
;
; LMULMAX1-RV64-LABEL: mulhs_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle32.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI135_0)
define void @mulhs_v4i64(<4 x i64>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: addi a2, a0, 16
-; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
-; LMULMAX1-RV32-NEXT: lui a3, %hi(.LCPI136_0)
-; LMULMAX1-RV32-NEXT: addi a3, a3, %lo(.LCPI136_0)
-; LMULMAX1-RV32-NEXT: addi a4, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a4, a4, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v27, (a3)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: addi a1, a0, 16
+; LMULMAX1-RV32-NEXT: vle64.v v26, (a1)
+; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI136_0)
+; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI136_0)
+; LMULMAX1-RV32-NEXT: vsetivli a3, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v27, (a2)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vdiv.vv v26, v26, v27
; LMULMAX1-RV32-NEXT: vdiv.vv v25, v25, v27
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: vse64.v v26, (a2)
+; LMULMAX1-RV32-NEXT: vse64.v v26, (a1)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: mulhs_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
; LMULMAX1-RV64-NEXT: vle64.v v25, (a1)
; LMULMAX1-RV64-NEXT: lui a2, %hi(.LCPI136_0)
;
; LMULMAX1-RV32-LABEL: smin_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smin_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @smin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: smin_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vmin.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: smin_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smin_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @smin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: smin_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vmin.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: smin_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smin_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @smin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: smin_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vmin.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: smin_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smin_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: smax_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smax_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @smax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: smax_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vmax.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: smax_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smax_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @smax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: smax_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vmax.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: smax_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smax_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @smax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: smax_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vmax.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: smax_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: smax_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: umin_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umin_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @umin_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: umin_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vminu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: umin_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umin_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @umin_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: umin_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vminu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: umin_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umin_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @umin_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: umin_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vminu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: umin_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umin_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV32-LABEL: umax_v32i8:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle8.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umax_v32i8:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle8.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle8.v v26, (a2)
define void @umax_v16i16(<16 x i16>* %x, <16 x i16>* %y) {
; LMULMAX2-LABEL: umax_v16i16:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 16
-; LMULMAX2-NEXT: vsetvli a2, a2, e16,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 16, e16,m2,ta,mu
; LMULMAX2-NEXT: vle16.v v26, (a0)
; LMULMAX2-NEXT: vle16.v v28, (a1)
; LMULMAX2-NEXT: vmaxu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: umax_v16i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle16.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umax_v16i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle16.v v26, (a2)
define void @umax_v8i32(<8 x i32>* %x, <8 x i32>* %y) {
; LMULMAX2-LABEL: umax_v8i32:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 8
-; LMULMAX2-NEXT: vsetvli a2, a2, e32,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 8, e32,m2,ta,mu
; LMULMAX2-NEXT: vle32.v v26, (a0)
; LMULMAX2-NEXT: vle32.v v28, (a1)
; LMULMAX2-NEXT: vmaxu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: umax_v8i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umax_v8i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle32.v v26, (a2)
define void @umax_v4i64(<4 x i64>* %x, <4 x i64>* %y) {
; LMULMAX2-LABEL: umax_v4i64:
; LMULMAX2: # %bb.0:
-; LMULMAX2-NEXT: addi a2, zero, 4
-; LMULMAX2-NEXT: vsetvli a2, a2, e64,m2,ta,mu
+; LMULMAX2-NEXT: vsetivli a2, 4, e64,m2,ta,mu
; LMULMAX2-NEXT: vle64.v v26, (a0)
; LMULMAX2-NEXT: vle64.v v28, (a1)
; LMULMAX2-NEXT: vmaxu.vv v26, v26, v28
;
; LMULMAX1-RV32-LABEL: umax_v4i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a2, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV32-NEXT: addi a2, a0, 16
; LMULMAX1-RV32-NEXT: vle64.v v26, (a2)
;
; LMULMAX1-RV64-LABEL: umax_v4i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a2, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a2, a1, 16
; LMULMAX1-RV64-NEXT: vle64.v v26, (a2)
define void @add_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: add_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vadd.vi v25, v25, -1
; CHECK-NEXT: vse8.v v25, (a0)
define void @add_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: add_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vadd.vi v25, v25, -1
; CHECK-NEXT: vse16.v v25, (a0)
define void @add_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: add_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vadd.vi v25, v25, -1
; CHECK-NEXT: vse32.v v25, (a0)
define void @add_iv_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: add_iv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vadd.vi v25, v25, 1
; CHECK-NEXT: vse8.v v25, (a0)
define void @add_iv_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: add_iv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vadd.vi v25, v25, 1
; CHECK-NEXT: vse16.v v25, (a0)
define void @add_iv_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: add_iv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vadd.vi v25, v25, 1
; CHECK-NEXT: vse32.v v25, (a0)
define void @add_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: add_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vadd.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @add_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: add_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vadd.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @add_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: add_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vadd.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @add_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: add_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vadd.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @add_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: add_xv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vadd.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @add_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: add_xv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vadd.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @sub_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: sub_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: addi a1, zero, -1
; CHECK-NEXT: vsub.vx v25, v25, a1
define void @sub_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: sub_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: addi a1, zero, -1
; CHECK-NEXT: vsub.vx v25, v25, a1
define void @sub_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: sub_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: addi a1, zero, -1
; CHECK-NEXT: vsub.vx v25, v25, a1
define void @sub_iv_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: sub_iv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vrsub.vi v25, v25, 1
; CHECK-NEXT: vse8.v v25, (a0)
define void @sub_iv_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: sub_iv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vrsub.vi v25, v25, 1
; CHECK-NEXT: vse16.v v25, (a0)
define void @sub_iv_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: sub_iv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vrsub.vi v25, v25, 1
; CHECK-NEXT: vse32.v v25, (a0)
define void @sub_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: sub_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsub.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @sub_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: sub_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsub.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @sub_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: sub_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsub.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @sub_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: sub_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vrsub.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @sub_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: sub_xv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vrsub.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @sub_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: sub_xv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vrsub.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @mul_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: mul_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmul.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @mul_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: mul_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vmul.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @mul_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: mul_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vmul.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @mul_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: mul_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vmul.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @mul_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: mul_xv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vmul.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @mul_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: mul_xv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vmul.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @and_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: and_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, -2
; CHECK-NEXT: vse8.v v25, (a0)
define void @and_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: and_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, -2
; CHECK-NEXT: vse16.v v25, (a0)
define void @and_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: and_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, -2
; CHECK-NEXT: vse32.v v25, (a0)
define void @and_iv_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: and_iv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vse8.v v25, (a0)
define void @and_iv_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: and_iv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vse16.v v25, (a0)
define void @and_iv_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: and_iv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vse32.v v25, (a0)
define void @and_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: and_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @and_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: and_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vand.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @and_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: and_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vand.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @and_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: and_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vand.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @and_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: and_xv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vand.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @and_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: and_xv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vand.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @or_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: or_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vor.vi v25, v25, -2
; CHECK-NEXT: vse8.v v25, (a0)
define void @or_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: or_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vor.vi v25, v25, -2
; CHECK-NEXT: vse16.v v25, (a0)
define void @or_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: or_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vor.vi v25, v25, -2
; CHECK-NEXT: vse32.v v25, (a0)
define void @or_iv_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: or_iv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vor.vi v25, v25, 1
; CHECK-NEXT: vse8.v v25, (a0)
define void @or_iv_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: or_iv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vor.vi v25, v25, 1
; CHECK-NEXT: vse16.v v25, (a0)
define void @or_iv_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: or_iv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vor.vi v25, v25, 1
; CHECK-NEXT: vse32.v v25, (a0)
define void @or_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: or_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vor.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @or_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: or_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vor.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @or_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: or_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vor.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @or_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: or_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vor.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @or_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: or_xv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vor.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @or_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: or_xv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vor.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @xor_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: xor_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vxor.vi v25, v25, -1
; CHECK-NEXT: vse8.v v25, (a0)
define void @xor_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: xor_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vxor.vi v25, v25, -1
; CHECK-NEXT: vse16.v v25, (a0)
define void @xor_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: xor_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vxor.vi v25, v25, -1
; CHECK-NEXT: vse32.v v25, (a0)
define void @xor_iv_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: xor_iv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vxor.vi v25, v25, 1
; CHECK-NEXT: vse8.v v25, (a0)
define void @xor_iv_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: xor_iv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vxor.vi v25, v25, 1
; CHECK-NEXT: vse16.v v25, (a0)
define void @xor_iv_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: xor_iv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vxor.vi v25, v25, 1
; CHECK-NEXT: vse32.v v25, (a0)
define void @xor_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: xor_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vxor.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @xor_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: xor_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vxor.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @xor_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: xor_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vxor.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @xor_xv_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: xor_xv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vxor.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @xor_xv_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: xor_xv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vxor.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @xor_xv_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: xor_xv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vxor.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @lshr_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: lshr_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsrl.vi v25, v25, 7
; CHECK-NEXT: vse8.v v25, (a0)
define void @lshr_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: lshr_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsrl.vi v25, v25, 15
; CHECK-NEXT: vse16.v v25, (a0)
define void @lshr_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: lshr_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsrl.vi v25, v25, 31
; CHECK-NEXT: vse32.v v25, (a0)
define void @lshr_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: lshr_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @lshr_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: lshr_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @lshr_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: lshr_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @ashr_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: ashr_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsra.vi v25, v25, 7
; CHECK-NEXT: vse8.v v25, (a0)
define void @ashr_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: ashr_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsra.vi v25, v25, 15
; CHECK-NEXT: vse16.v v25, (a0)
define void @ashr_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: ashr_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsra.vi v25, v25, 31
; CHECK-NEXT: vse32.v v25, (a0)
define void @ashr_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: ashr_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsra.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @ashr_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: ashr_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsra.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @ashr_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: ashr_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsra.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @shl_vi_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: shl_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsll.vi v25, v25, 7
; CHECK-NEXT: vse8.v v25, (a0)
define void @shl_vi_v8i16(<8 x i16>* %x) {
; CHECK-LABEL: shl_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsll.vi v25, v25, 15
; CHECK-NEXT: vse16.v v25, (a0)
define void @shl_vi_v4i32(<4 x i32>* %x) {
; CHECK-LABEL: shl_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsll.vi v25, v25, 31
; CHECK-NEXT: vse32.v v25, (a0)
define void @shl_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: shl_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @shl_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: shl_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @shl_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: shl_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vsll.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @sdiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: sdiv_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vdiv.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @sdiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: sdiv_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vdiv.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @sdiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: sdiv_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vdiv.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @srem_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: srem_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vrem.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @srem_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: srem_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vrem.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @srem_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: srem_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vrem.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @udiv_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: udiv_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vdivu.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @udiv_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: udiv_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vdivu.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @udiv_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: udiv_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vdivu.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @urem_vx_v16i8(<16 x i8>* %x, i8 %y) {
; CHECK-LABEL: urem_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: vremu.vx v25, v25, a1
; CHECK-NEXT: vse8.v v25, (a0)
define void @urem_vx_v8i16(<8 x i16>* %x, i16 %y) {
; CHECK-LABEL: urem_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e16,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e16,m1,ta,mu
; CHECK-NEXT: vle16.v v25, (a0)
; CHECK-NEXT: vremu.vx v25, v25, a1
; CHECK-NEXT: vse16.v v25, (a0)
define void @urem_vx_v4i32(<4 x i32>* %x, i32 %y) {
; CHECK-LABEL: urem_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e32,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e32,m1,ta,mu
; CHECK-NEXT: vle32.v v25, (a0)
; CHECK-NEXT: vremu.vx v25, v25, a1
; CHECK-NEXT: vse32.v v25, (a0)
define void @mulhu_vx_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: mulhu_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: addi a1, zero, 57
; CHECK-NEXT: vmulhu.vx v25, v25, a1
define void @mulhu_vx_v8i16(<8 x i16>* %x) {
; LMULMAX1-RV32-LABEL: mulhu_vx_v8i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: lui a1, 2
; LMULMAX1-RV32-NEXT: addi a1, a1, 1171
;
; LMULMAX1-RV64-LABEL: mulhu_vx_v8i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, 2
; LMULMAX1-RV64-NEXT: addiw a1, a1, 1171
define void @mulhu_vx_v4i32(<4 x i32>* %x) {
; LMULMAX1-RV32-LABEL: mulhu_vx_v4i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: lui a1, 838861
; LMULMAX1-RV32-NEXT: addi a1, a1, -819
;
; LMULMAX1-RV64-LABEL: mulhu_vx_v4i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, 838861
; LMULMAX1-RV64-NEXT: addiw a1, a1, -819
define void @mulhu_vx_v2i64(<2 x i64>* %x) {
; LMULMAX1-RV32-LABEL: mulhu_vx_v2i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI252_0)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI252_0)
-; LMULMAX1-RV32-NEXT: addi a3, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI252_0)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI252_0)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmulhu.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI252_1)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI252_1)
-; LMULMAX1-RV32-NEXT: vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI252_1)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI252_1)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vsrl.vv v25, v25, v26
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
; LMULMAX1-RV32-NEXT: ret
;
; LMULMAX1-RV64-LABEL: mulhu_vx_v2i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, 1026731
; LMULMAX1-RV64-NEXT: addiw a1, a1, -1365
define void @mulhs_vx_v16i8(<16 x i8>* %x) {
; CHECK-LABEL: mulhs_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle8.v v25, (a0)
; CHECK-NEXT: addi a1, zero, -123
; CHECK-NEXT: vmulhu.vx v25, v25, a1
define void @mulhs_vx_v8i16(<8 x i16>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_vx_v8i16:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 8
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV32-NEXT: lui a1, 5
; LMULMAX1-RV32-NEXT: addi a1, a1, -1755
;
; LMULMAX1-RV64-LABEL: mulhs_vx_v8i16:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 8
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e16,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 8, e16,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle16.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, 5
; LMULMAX1-RV64-NEXT: addiw a1, a1, -1755
define void @mulhs_vx_v4i32(<4 x i32>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_vx_v4i32:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV32-NEXT: lui a1, 629146
; LMULMAX1-RV32-NEXT: addi a1, a1, -1639
;
; LMULMAX1-RV64-LABEL: mulhs_vx_v4i32:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 4
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e32,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 4, e32,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle32.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, 629146
; LMULMAX1-RV64-NEXT: addiw a1, a1, -1639
define void @mulhs_vx_v2i64(<2 x i64>* %x) {
; LMULMAX1-RV32-LABEL: mulhs_vx_v2i64:
; LMULMAX1-RV32: # %bb.0:
-; LMULMAX1-RV32-NEXT: addi a1, zero, 2
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vle64.v v25, (a0)
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI256_0)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI256_0)
-; LMULMAX1-RV32-NEXT: addi a3, zero, 4
-; LMULMAX1-RV32-NEXT: vsetvli a4, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a2, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI256_0)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI256_0)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmulh.vv v25, v25, v26
-; LMULMAX1-RV32-NEXT: lui a2, %hi(.LCPI256_1)
-; LMULMAX1-RV32-NEXT: addi a2, a2, %lo(.LCPI256_1)
-; LMULMAX1-RV32-NEXT: vsetvli a3, a3, e32,m1,ta,mu
-; LMULMAX1-RV32-NEXT: vle32.v v26, (a2)
-; LMULMAX1-RV32-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV32-NEXT: lui a1, %hi(.LCPI256_1)
+; LMULMAX1-RV32-NEXT: addi a1, a1, %lo(.LCPI256_1)
+; LMULMAX1-RV32-NEXT: vsetivli a2, 4, e32,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vle32.v v26, (a1)
+; LMULMAX1-RV32-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV32-NEXT: vsrl.vv v26, v25, v26
; LMULMAX1-RV32-NEXT: vadd.vv v25, v25, v26
; LMULMAX1-RV32-NEXT: vse64.v v25, (a0)
;
; LMULMAX1-RV64-LABEL: mulhs_vx_v2i64:
; LMULMAX1-RV64: # %bb.0:
-; LMULMAX1-RV64-NEXT: addi a1, zero, 2
-; LMULMAX1-RV64-NEXT: vsetvli a1, a1, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a1, 2, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: lui a1, 21845
; LMULMAX1-RV64-NEXT: addiw a1, a1, 1365
define void @load_store_v1i1(<1 x i1>* %x, <1 x i1>* %y) {
; CHECK-LABEL: load_store_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @load_store_v2i1(<2 x i1>* %x, <2 x i1>* %y) {
; CHECK-LABEL: load_store_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 2
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 2, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @load_store_v4i1(<4 x i1>* %x, <4 x i1>* %y) {
; CHECK-LABEL: load_store_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 4
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 4, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @load_store_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: load_store_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @load_store_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: load_store_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vse1.v v25, (a1)
; CHECK-NEXT: ret
define void @and_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: and_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmand.mm v25, v25, v26
define void @or_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: or_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmor.mm v25, v25, v26
define void @andnot_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: andnot_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmandnot.mm v25, v25, v26
define void @ornot_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: ornot_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmornot.mm v25, v25, v26
define void @nand_v8i1(<8 x i1>* %x, <8 x i1>* %y) {
; CHECK-LABEL: nand_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 8
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmnand.mm v25, v25, v26
define void @nor_v16i1(<16 x i1>* %x, <16 x i1>* %y) {
; CHECK-LABEL: nor_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a2, zero, 16
-; CHECK-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v25, (a0)
; CHECK-NEXT: vle1.v v26, (a1)
; CHECK-NEXT: vmnor.mm v25, v25, v26
define void @splat_ones_v1i1(<1 x i1>* %x) {
; CHECK-LABEL: splat_ones_v1i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 1, e8,m1,ta,mu
; CHECK-NEXT: vmset.m v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zeros_v2i1(<2 x i1>* %x) {
; CHECK-LABEL: splat_zeros_v2i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 2
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 2, e8,m1,ta,mu
; CHECK-NEXT: vmclr.m v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_ones_v4i1(<4 x i1>* %x) {
; CHECK-LABEL: splat_ones_v4i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 4
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 4, e8,m1,ta,mu
; CHECK-NEXT: vmset.m v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_zeros_v8i1(<8 x i1>* %x) {
; CHECK-LABEL: splat_zeros_v8i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 8
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e8,m1,ta,mu
; CHECK-NEXT: vmclr.m v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
define void @splat_ones_v16i1(<16 x i1>* %x) {
; CHECK-LABEL: splat_ones_v16i1:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a1, zero, 16
-; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vmset.m v25
; CHECK-NEXT: vse1.v v25, (a0)
; CHECK-NEXT: ret
; LMULMAX1-RV32-LABEL: splat_zeros_v32i1:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: addi a1, a0, 2
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmclr.m v25
; LMULMAX1-RV32-NEXT: vse1.v v25, (a1)
; LMULMAX1-RV32-NEXT: vse1.v v25, (a0)
; LMULMAX1-RV64-LABEL: splat_zeros_v32i1:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: addi a1, a0, 2
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vmclr.m v25
; LMULMAX1-RV64-NEXT: vse1.v v25, (a1)
; LMULMAX1-RV64-NEXT: vse1.v v25, (a0)
; LMULMAX1-RV32-LABEL: splat_ones_v64i1:
; LMULMAX1-RV32: # %bb.0:
; LMULMAX1-RV32-NEXT: addi a1, a0, 6
-; LMULMAX1-RV32-NEXT: addi a2, zero, 16
-; LMULMAX1-RV32-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV32-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV32-NEXT: vmset.m v25
; LMULMAX1-RV32-NEXT: vse1.v v25, (a1)
; LMULMAX1-RV32-NEXT: addi a1, a0, 4
; LMULMAX1-RV64-LABEL: splat_ones_v64i1:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: addi a1, a0, 6
-; LMULMAX1-RV64-NEXT: addi a2, zero, 16
-; LMULMAX1-RV64-NEXT: vsetvli a2, a2, e8,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vsetivli a2, 16, e8,m1,ta,mu
; LMULMAX1-RV64-NEXT: vmset.m v25
; LMULMAX1-RV64-NEXT: vse1.v v25, (a1)
; LMULMAX1-RV64-NEXT: addi a1, a0, 4
define void @vselect_vv_v8i32(<8 x i32>* %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
; CHECK-LABEL: vselect_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a4, zero, 8
-; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a4, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
-; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
-; CHECK-NEXT: vsetvli a0, a4, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
; CHECK-NEXT: vse32.v v26, (a3)
; CHECK-NEXT: ret
define void @vselect_vx_v8i32(i32 %a, <8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
; CHECK-LABEL: vselect_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a4, zero, 8
-; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a4, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a1)
-; CHECK-NEXT: vsetvli a1, a4, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
-; CHECK-NEXT: vsetvli a1, a4, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0
; CHECK-NEXT: vse32.v v26, (a3)
; CHECK-NEXT: ret
define void @vselect_vi_v8i32(<8 x i32>* %b, <8 x i1>* %cc, <8 x i32>* %z) {
; CHECK-LABEL: vselect_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
-; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vim v26, v26, -1, v0
; CHECK-NEXT: vse32.v v26, (a2)
; CHECK-NEXT: ret
define void @vselect_vv_v8f32(<8 x float>* %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
; CHECK-LABEL: vselect_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a4, zero, 8
-; CHECK-NEXT: vsetvli a5, a4, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a4, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
; CHECK-NEXT: vle32.v v28, (a1)
-; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
-; CHECK-NEXT: vsetvli a0, a4, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
; CHECK-NEXT: vse32.v v26, (a3)
; CHECK-NEXT: ret
define void @vselect_vx_v8f32(float %a, <8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
; CHECK-LABEL: vselect_vx_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
-; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vfmerge.vfm v26, v26, fa0, v0
; CHECK-NEXT: vse32.v v26, (a2)
; CHECK-NEXT: ret
define void @vselect_vfpzero_v8f32(<8 x float>* %b, <8 x i1>* %cc, <8 x float>* %z) {
; CHECK-LABEL: vselect_vfpzero_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 8
-; CHECK-NEXT: vsetvli a4, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 8, e32,m2,ta,mu
; CHECK-NEXT: vle32.v v26, (a0)
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
-; CHECK-NEXT: vsetvli a0, a3, e32,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 8, e32,m2,ta,mu
; CHECK-NEXT: vmerge.vim v26, v26, 0, v0
; CHECK-NEXT: vse32.v v26, (a2)
; CHECK-NEXT: ret
define void @vselect_vv_v16i16(<16 x i16>* %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
; CHECK-LABEL: vselect_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a4, zero, 16
-; CHECK-NEXT: vsetvli a5, a4, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a4, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
; CHECK-NEXT: vle16.v v28, (a1)
-; CHECK-NEXT: vsetvli a0, a4, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
-; CHECK-NEXT: vsetvli a0, a4, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e16,m2,ta,mu
; CHECK-NEXT: vmerge.vvm v26, v28, v26, v0
; CHECK-NEXT: vse16.v v26, (a3)
; CHECK-NEXT: ret
define void @vselect_vx_v16i16(i16 signext %a, <16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
; CHECK-LABEL: vselect_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a4, zero, 16
-; CHECK-NEXT: vsetvli a5, a4, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a4, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a1)
-; CHECK-NEXT: vsetvli a1, a4, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a2)
-; CHECK-NEXT: vsetvli a1, a4, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a1, 16, e16,m2,ta,mu
; CHECK-NEXT: vmerge.vxm v26, v26, a0, v0
; CHECK-NEXT: vse16.v v26, (a3)
; CHECK-NEXT: ret
define void @vselect_vi_v16i16(<16 x i16>* %b, <16 x i1>* %cc, <16 x i16>* %z) {
; CHECK-LABEL: vselect_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: addi a3, zero, 16
-; CHECK-NEXT: vsetvli a4, a3, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a3, 16, e16,m2,ta,mu
; CHECK-NEXT: vle16.v v26, (a0)
-; CHECK-NEXT: vsetvli a0, a3, e8,m1,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e8,m1,ta,mu
; CHECK-NEXT: vle1.v v0, (a1)
-; CHECK-NEXT: vsetvli a0, a3, e16,m2,ta,mu
+; CHECK-NEXT: vsetivli a0, 16, e16,m2,ta,mu
; CHECK-NEXT: vmerge.vim v26, v26, 4, v0
; CHECK-NEXT: vse16.v v26, (a2)
; CHECK-NEXT: ret
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vredsum.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vredmaxu.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vor.vv v25, v26, v25
; CHECK-NEXT: vredmax.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vredminu.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vor.vv v25, v25, v26
; CHECK-NEXT: vredmin.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vmv.v.i v25, -1
; CHECK-NEXT: vredand.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vredor.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vredxor.vs v25, v8, v25
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredsum.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredmaxu.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredmax.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vredminu.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredmin.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vredand.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredor.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredxor.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredsum.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredmaxu.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredmax.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vredminu.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredmin.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a2, zero, 1
-; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: vredand.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredor.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vredxor.vs v25, v8, v25
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a0, v25
-; CHECK-NEXT: addi a1, zero, 1
-; CHECK-NEXT: addi a2, zero, 32
-; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu
-; CHECK-NEXT: vsrl.vx v25, v25, a2
+; CHECK-NEXT: addi a1, zero, 32
+; CHECK-NEXT: vsetivli a2, 1, e64,m1,ta,mu
+; CHECK-NEXT: vsrl.vx v25, v25, a1
; CHECK-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; CHECK-NEXT: vmv.x.s a1, v25
; CHECK-NEXT: ret
define <vscale x 16 x i16> @test_vlseg2_mask_nxv16i16(i16* %base, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vlseg2e16.v v4, (a0)
; CHECK-NEXT: vmv4r.v v8, v4
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,tu,mu
; CHECK-NEXT: vlseg2e16.v v4, (a0), v0.t
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
define <vscale x 16 x i16> @test_vlsseg2_mask_nxv16i16(i16* %base, i64 %offset, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vlsseg2_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mv a2, zero
-; CHECK-NEXT: vsetvli a3, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 0, e16,m4,ta,mu
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1
; CHECK-NEXT: vmv4r.v v8, v4
-; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT: vsetivli a2, 0, e16,m4,tu,mu
; CHECK-NEXT: vlsseg2e16.v v4, (a0), a1, v0.t
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v4m4_v8m4
; CHECK-NEXT: ret
define <vscale x 16 x i16> @test_vloxseg2_mask_nxv16i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vloxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,tu,mu
; CHECK-NEXT: vloxseg2ei16.v v12, (a0), v8, v0.t
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
define <vscale x 16 x i16> @test_vluxseg2_mask_nxv16i16_nxv16i16(i16* %base, <vscale x 16 x i16> %index, <vscale x 16 x i1> %mask) {
; CHECK-LABEL: test_vluxseg2_mask_nxv16i16_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a2, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8
; CHECK-NEXT: vmv4r.v v16, v12
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,tu,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,tu,mu
; CHECK-NEXT: vluxseg2ei16.v v12, (a0), v8, v0.t
; CHECK-NEXT: vmv4r.v v8, v16
; CHECK-NEXT: ret
define <vscale x 16 x i16> @test_vlseg2ff_nxv16i16(i16* %base, i64* %outvl) {
; CHECK-LABEL: test_vlseg2ff_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: mv a2, zero
-; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 0, e16,m4,ta,mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0)
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a1)
; CHECK-LABEL: test_vlseg2ff_mask_nxv16i16:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vmv4r.v v4, v8
-; CHECK-NEXT: mv a2, zero
-; CHECK-NEXT: vsetvli a2, a2, e16,m4,tu,mu
+; CHECK-NEXT: vsetivli a2, 0, e16,m4,tu,mu
; CHECK-NEXT: vlseg2e16ff.v v4, (a0), v0.t
; CHECK-NEXT: csrr a0, vl
; CHECK-NEXT: sd a0, 0(a1)
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vsseg2e16.v v8, (a0)
; CHECK-NEXT: ret
entry:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vsseg2e16.v v8, (a0), v0.t
; CHECK-NEXT: ret
entry:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a2, zero
-; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 0, e16,m4,ta,mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1
; CHECK-NEXT: ret
entry:
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a2, zero
-; CHECK-NEXT: vsetvli a2, a2, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a2, 0, e16,m4,ta,mu
; CHECK-NEXT: vssseg2e16.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
entry:
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v28, v12
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v28
; CHECK-NEXT: ret
entry:
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v28, v12
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vsoxseg2ei16.v v8, (a0), v28, v0.t
; CHECK-NEXT: ret
entry:
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v28, v12
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28
; CHECK-NEXT: ret
entry:
; CHECK-NEXT: # kill: def $v8m4 killed $v8m4 killed $v8m4_v12m4 def $v8m4_v12m4
; CHECK-NEXT: vmv4r.v v28, v12
; CHECK-NEXT: vmv4r.v v12, v8
-; CHECK-NEXT: mv a1, zero
-; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetivli a1, 0, e16,m4,ta,mu
; CHECK-NEXT: vsuxseg2ei16.v v8, (a0), v28, v0.t
; CHECK-NEXT: ret
entry: