const MachineRegisterInfo *MRI) {
VSETVLIInfo InstrInfo;
- // If the instruction has policy argument, use the argument.
- // If there is no policy argument, default to tail agnostic unless the
- // destination is tied to a source. Unless the source is undef. In that case
- // the user would have some control over the policy values.
- bool TailAgnostic = true;
- bool MaskAgnostic = true;
+ bool TailAgnostic, MaskAgnostic;
unsigned UseOpIdx;
- if (RISCVII::hasVecPolicyOp(TSFlags)) {
- const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
- uint64_t Policy = Op.getImm();
- assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
- "Invalid Policy Value");
- // Although in some cases, mismatched passthru/maskedoff with policy value
- // does not make sense (ex. tied operand is IMPLICIT_DEF with non-TAMA
- // policy, or tied operand is not IMPLICIT_DEF with TAMA policy), but users
- // have set the policy value explicitly, so compiler would not fix it.
- TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
- MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
- } else if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
+ if (MI.isRegTiedToUseOperand(0, &UseOpIdx)) {
+ // Start with undisturbed.
TailAgnostic = false;
MaskAgnostic = false;
- // If the tied operand is an IMPLICIT_DEF we can keep TailAgnostic.
+
+ // If there is a policy operand, use it.
+ if (RISCVII::hasVecPolicyOp(TSFlags)) {
+ const MachineOperand &Op = MI.getOperand(MI.getNumExplicitOperands() - 1);
+ uint64_t Policy = Op.getImm();
+ assert(Policy <= (RISCVII::TAIL_AGNOSTIC | RISCVII::MASK_AGNOSTIC) &&
+ "Invalid Policy Value");
+ TailAgnostic = Policy & RISCVII::TAIL_AGNOSTIC;
+ MaskAgnostic = Policy & RISCVII::MASK_AGNOSTIC;
+ }
+
+ // If the tied operand is an IMPLICIT_DEF we can use TailAgnostic and
+ // MaskAgnostic.
const MachineOperand &UseMO = MI.getOperand(UseOpIdx);
MachineInstr *UseMI = MRI->getVRegDef(UseMO.getReg());
if (UseMI && UseMI->isImplicitDef()) {
// tied def.
if (RISCVII::doesForceTailAgnostic(TSFlags))
TailAgnostic = true;
- }
- if (!RISCVII::usesMaskPolicy(TSFlags))
+ if (!RISCVII::usesMaskPolicy(TSFlags))
+ MaskAgnostic = true;
+ } else {
+ // If there is no tied operand,, there shouldn't be a policy operand.
+ assert(!RISCVII::hasVecPolicyOp(TSFlags) && "Unexpected policy operand");
+ // No tied operand use agnostic policies.
+ TailAgnostic = true;
MaskAgnostic = true;
+ }
RISCVII::VLMUL VLMul = RISCVII::getLMul(TSFlags);
; CHECK-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; CHECK-NEXT: vmclr.m v0
; CHECK-NEXT: li s0, 36
-; CHECK-NEXT: vsetvli zero, s0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: vfwadd.vv v8, v8, v8, v0.t
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; CHECK-NEXT: call func@plt
; CHECK-NEXT: li a0, 32
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vrgather.vv v4, v8, v8, v0.t
; CHECK-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; CHECK-NEXT: csrr a1, vlenb
; SUBREGLIVENESS-NEXT: vsetvli a0, zero, e8, m2, ta, ma
; SUBREGLIVENESS-NEXT: vmclr.m v0
; SUBREGLIVENESS-NEXT: li s0, 36
-; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, tu, mu
+; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vfwadd.vv v8, v8, v8, v0.t
; SUBREGLIVENESS-NEXT: addi a0, sp, 16
; SUBREGLIVENESS-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
; SUBREGLIVENESS-NEXT: call func@plt
; SUBREGLIVENESS-NEXT: li a0, 32
-; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, tu, mu
+; SUBREGLIVENESS-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: vrgather.vv v16, v8, v8, v0.t
; SUBREGLIVENESS-NEXT: vsetvli zero, s0, e16, m4, ta, ma
; SUBREGLIVENESS-NEXT: csrr a1, vlenb
define <2 x i1> @vtrunc_v2i1_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v2i1_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
define <2 x i1> @vtrunc_v2i1_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v2i1_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
define <2 x i1> @vtrunc_v2i1_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_v2i1_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vslide1up.vx v9, v8, a1
; RV32-NEXT: vslide1up.vx v10, v9, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslideup.vi v8, v10, 0
; RV32-NEXT: ret
;
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vslide1up.vx v9, v8, a1
; RV32-NEXT: vslide1up.vx v10, v9, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslideup.vi v8, v10, 0
; RV32-NEXT: ret
;
; RV32-NEXT: vmv.v.i v8, 0
; RV32-NEXT: vslide1up.vx v9, v8, a1
; RV32-NEXT: vslide1up.vx v10, v9, a0
-; RV32-NEXT: vsetivli zero, 1, e64, m1, tu, ma
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vslideup.vi v8, v10, 0
; RV32-NEXT: ret
;
; RV32-FP-NEXT: vmv.v.i v8, 0
; RV32-FP-NEXT: vslide1up.vx v9, v8, a1
; RV32-FP-NEXT: vslide1up.vx v10, v9, a0
-; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma
+; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-FP-NEXT: vslideup.vi v8, v10, 0
; RV32-FP-NEXT: ret
;
; RV32-FP-NEXT: vmv.v.i v8, 0
; RV32-FP-NEXT: vslide1up.vx v9, v8, a1
; RV32-FP-NEXT: vslide1up.vx v10, v9, a0
-; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma
+; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-FP-NEXT: vslideup.vi v8, v10, 0
; RV32-FP-NEXT: ret
;
; RV32-FP-NEXT: vmv.v.i v8, 0
; RV32-FP-NEXT: vslide1up.vx v9, v8, a1
; RV32-FP-NEXT: vslide1up.vx v10, v9, a0
-; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, tu, ma
+; RV32-FP-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-FP-NEXT: vslideup.vi v8, v10, 0
; RV32-FP-NEXT: ret
;
define <4 x half> @slideup_v4f16(<4 x half> %x) {
; CHECK-LABEL: slideup_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <8 x float> @slideup_v8f32(<8 x float> %x) {
; CHECK-LABEL: slideup_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vslideup.vi v10, v8, 3
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%s = shufflevector <8 x float> %x, <8 x float> poison, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 4>
ret <8 x float> %s
; LMULMAX2: # %bb.0:
; LMULMAX2-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX2-NEXT: vle32.v v8, (a1)
-; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, tu, ma
+; LMULMAX2-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; LMULMAX2-NEXT: vslideup.vi v10, v8, 6
; LMULMAX2-NEXT: vse32.v v10, (a0)
; LMULMAX2-NEXT: ret
; LMULMAX1: # %bb.0:
; LMULMAX1-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; LMULMAX1-NEXT: vle32.v v8, (a1)
-; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, tu, ma
+; LMULMAX1-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; LMULMAX1-NEXT: vslideup.vi v9, v8, 2
; LMULMAX1-NEXT: addi a0, a0, 16
; LMULMAX1-NEXT: vse32.v v9, (a0)
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0)
-; CHECK-NEXT: vsetivli zero, 4, e64, m8, tu, ma
+; CHECK-NEXT: vsetivli zero, 4, e64, m8, ta, ma
; CHECK-NEXT: vslideup.vi v16, v8, 2
; CHECK-NEXT: vs8r.v v16, (a1)
; CHECK-NEXT: ret
define <4 x i16> @slideup_v4i16(<4 x i16> %x) {
; CHECK-LABEL: slideup_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, tu, ma
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vslideup.vi v9, v8, 1
; CHECK-NEXT: vmv1r.v v8, v9
; CHECK-NEXT: ret
define <8 x i32> @slideup_v8i32(<8 x i32> %x) {
; CHECK-LABEL: slideup_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, tu, ma
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vslideup.vi v10, v8, 3
-; CHECK-NEXT: vmv2r.v v8, v10
+; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
%s = shufflevector <8 x i32> %x, <8 x i32> poison, <8 x i32> <i32 undef, i32 undef, i32 undef, i32 undef, i32 1, i32 2, i32 3, i32 4>
ret <8 x i32> %s
define void @masked_load_v1f16(<1 x half>* %a, <1 x half>* %m_ptr, <1 x half>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v1f32(<1 x float>* %a, <1 x float>* %m_ptr, <1 x float>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v1f64(<1 x double>* %a, <1 x double>* %m_ptr, <1 x double>* %res_ptr) nounwind {
; RV32-LABEL: masked_load_v1f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV32-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV32-NEXT: vle64.v v8, (a1)
; RV32-NEXT: fcvt.d.w ft0, zero
; RV32-NEXT: vmfeq.vf v0, v8, ft0
;
; RV64-LABEL: masked_load_v1f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; RV64-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: fmv.d.x ft0, zero
; RV64-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v2f16(<2 x half>* %a, <2 x half>* %m_ptr, <2 x half>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v2f32(<2 x float>* %a, <2 x float>* %m_ptr, <2 x float>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v2f64(<2 x double>* %a, <2 x double>* %m_ptr, <2 x double>* %res_ptr) nounwind {
; RV32-LABEL: masked_load_v2f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vle64.v v8, (a1)
; RV32-NEXT: fcvt.d.w ft0, zero
; RV32-NEXT: vmfeq.vf v0, v8, ft0
;
; RV64-LABEL: masked_load_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: fmv.d.x ft0, zero
; RV64-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v4f16(<4 x half>* %a, <4 x half>* %m_ptr, <4 x half>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v4f32(<4 x float>* %a, <4 x float>* %m_ptr, <4 x float>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v4f64(<4 x double>* %a, <4 x double>* %m_ptr, <4 x double>* %res_ptr) nounwind {
; RV32-LABEL: masked_load_v4f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vle64.v v8, (a1)
; RV32-NEXT: fcvt.d.w ft0, zero
; RV32-NEXT: vmfeq.vf v0, v8, ft0
;
; RV64-LABEL: masked_load_v4f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; RV64-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: fmv.d.x ft0, zero
; RV64-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v8f16(<8 x half>* %a, <8 x half>* %m_ptr, <8 x half>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v8f32(<8 x float>* %a, <8 x float>* %m_ptr, <8 x float>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v8f64(<8 x double>* %a, <8 x double>* %m_ptr, <8 x double>* %res_ptr) nounwind {
; RV32-LABEL: masked_load_v8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vle64.v v8, (a1)
; RV32-NEXT: fcvt.d.w ft0, zero
; RV32-NEXT: vmfeq.vf v0, v8, ft0
;
; RV64-LABEL: masked_load_v8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: fmv.d.x ft0, zero
; RV64-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v16f16(<16 x half>* %a, <16 x half>* %m_ptr, <16 x half>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v16f32(<16 x float>* %a, <16 x float>* %m_ptr, <16 x float>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
define void @masked_load_v16f64(<16 x double>* %a, <16 x double>* %m_ptr, <16 x double>* %res_ptr) nounwind {
; RV32-LABEL: masked_load_v16f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vle64.v v8, (a1)
; RV32-NEXT: fcvt.d.w ft0, zero
; RV32-NEXT: vmfeq.vf v0, v8, ft0
;
; RV64-LABEL: masked_load_v16f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v8, (a1)
; RV64-NEXT: fmv.d.x ft0, zero
; RV64-NEXT: vmfeq.vf v0, v8, ft0
; CHECK-LABEL: masked_load_v32f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
-; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
; CHECK-LABEL: masked_load_v32f32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
; RV32-LABEL: masked_load_v32f64:
; RV32: # %bb.0:
; RV32-NEXT: addi a3, a1, 128
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vle64.v v16, (a1)
; RV32-NEXT: vle64.v v24, (a3)
; RV32-NEXT: fcvt.d.w ft0, zero
; RV64-LABEL: masked_load_v32f64:
; RV64: # %bb.0:
; RV64-NEXT: addi a3, a1, 128
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: vle64.v v24, (a3)
; RV64-NEXT: fmv.d.x ft0, zero
; CHECK-LABEL: masked_load_v64f16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 64
-; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: fmv.h.x ft0, zero
; CHECK-NEXT: vmfeq.vf v0, v8, ft0
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, a1, 128
; CHECK-NEXT: li a4, 32
-; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v16, (a1)
; CHECK-NEXT: vle32.v v24, (a3)
; CHECK-NEXT: fmv.w.x ft0, zero
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, a1, 128
; CHECK-NEXT: li a4, 64
-; CHECK-NEXT: vsetvli zero, a4, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v16, (a1)
; CHECK-NEXT: vle16.v v24, (a3)
; CHECK-NEXT: fmv.h.x ft0, zero
define void @masked_load_v1i8(<1 x i8>* %a, <1 x i8>* %m_ptr, <1 x i8>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
define void @masked_load_v1i16(<1 x i16>* %a, <1 x i16>* %m_ptr, <1 x i16>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
define void @masked_load_v1i32(<1 x i32>* %a, <1 x i32>* %m_ptr, <1 x i32>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle32.v v8, (a0), v0.t
define void @masked_load_v1i64(<1 x i64>* %a, <1 x i64>* %m_ptr, <1 x i64>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle64.v v8, (a0), v0.t
define void @masked_load_v2i8(<2 x i8>* %a, <2 x i8>* %m_ptr, <2 x i8>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
define void @masked_load_v2i16(<2 x i16>* %a, <2 x i16>* %m_ptr, <2 x i16>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
define void @masked_load_v2i32(<2 x i32>* %a, <2 x i32>* %m_ptr, <2 x i32>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle32.v v8, (a0), v0.t
define void @masked_load_v2i64(<2 x i64>* %a, <2 x i64>* %m_ptr, <2 x i64>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle64.v v8, (a0), v0.t
define void @masked_load_v4i8(<4 x i8>* %a, <4 x i8>* %m_ptr, <4 x i8>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, mu
+; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
define void @masked_load_v4i16(<4 x i16>* %a, <4 x i16>* %m_ptr, <4 x i16>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
define void @masked_load_v4i32(<4 x i32>* %a, <4 x i32>* %m_ptr, <4 x i32>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle32.v v8, (a0), v0.t
define void @masked_load_v4i64(<4 x i64>* %a, <4 x i64>* %m_ptr, <4 x i64>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, mu
+; CHECK-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle64.v v8, (a0), v0.t
define void @masked_load_v8i8(<8 x i8>* %a, <8 x i8>* %m_ptr, <8 x i8>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu
+; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
define void @masked_load_v8i16(<8 x i16>* %a, <8 x i16>* %m_ptr, <8 x i16>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
define void @masked_load_v8i32(<8 x i32>* %a, <8 x i32>* %m_ptr, <8 x i32>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu
+; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle32.v v8, (a0), v0.t
define void @masked_load_v8i64(<8 x i64>* %a, <8 x i64>* %m_ptr, <8 x i64>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu
+; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle64.v v8, (a0), v0.t
define void @masked_load_v16i8(<16 x i8>* %a, <16 x i8>* %m_ptr, <16 x i8>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
define void @masked_load_v16i16(<16 x i16>* %a, <16 x i16>* %m_ptr, <16 x i16>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, mu
+; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
define void @masked_load_v16i32(<16 x i32>* %a, <16 x i32>* %m_ptr, <16 x i32>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, mu
+; CHECK-NEXT: vsetivli zero, 16, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle32.v v8, (a0), v0.t
define void @masked_load_v16i64(<16 x i64>* %a, <16 x i64>* %m_ptr, <16 x i64>* %res_ptr) nounwind {
; CHECK-LABEL: masked_load_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-LABEL: masked_load_v32i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
-; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-LABEL: masked_load_v32i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
-; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e16, m4, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-LABEL: masked_load_v32i32:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 32
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; RV32-NEXT: li a1, 32
; RV32-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, 0
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vmseq.vv v8, v0, v24
; RV32-NEXT: vmseq.vv v0, v16, v24
; RV32-NEXT: addi a1, a0, 128
; RV64-LABEL: masked_load_v32i64:
; RV64: # %bb.0:
; RV64-NEXT: addi a3, a1, 128
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vle64.v v16, (a1)
; RV64-NEXT: vle64.v v24, (a3)
; RV64-NEXT: vmseq.vi v8, v16, 0
; CHECK-LABEL: masked_load_v64i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 64
-; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e8, m4, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-LABEL: masked_load_v64i16:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 64
-; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, a1, 128
; CHECK-NEXT: li a4, 32
-; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v16, (a1)
; CHECK-NEXT: vle32.v v24, (a3)
; CHECK-NEXT: vmseq.vi v8, v16, 0
; CHECK-LABEL: masked_load_v128i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a3, 128
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a1)
; CHECK-NEXT: vmseq.vi v0, v8, 0
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK: # %bb.0:
; CHECK-NEXT: addi a3, a1, 128
; CHECK-NEXT: li a4, 128
-; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v16, (a1)
; CHECK-NEXT: vle8.v v24, (a3)
; CHECK-NEXT: vmseq.vi v8, v16, 0
define <2 x i8> @strided_vpload_v2i8_i8(i8* %ptr, i8 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i8_i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i8_i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i8(i8* %ptr, i8 %stride, <2 x i1> %m, i32 %evl)
define <2 x i8> @strided_vpload_v2i8_i16(i8* %ptr, i16 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i8_i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i8_i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i16(i8* %ptr, i16 %stride, <2 x i1> %m, i32 %evl)
define <2 x i8> @strided_vpload_v2i8_i64(i8* %ptr, i64 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i8_i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i8_i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i64(i8* %ptr, i64 %stride, <2 x i1> %m, i32 %evl)
define <2 x i8> @strided_vpload_v2i8(i8* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i8> @llvm.experimental.vp.strided.load.v2i8.p0i8.i32(i8* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x i8> @strided_vpload_v4i8(i8* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x i8> @llvm.experimental.vp.strided.load.v4i8.p0i8.i32(i8* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x i8> @strided_vpload_v8i8(i8* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x i8> @llvm.experimental.vp.strided.load.v8i8.p0i8.i32(i8* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <2 x i16> @strided_vpload_v2i16(i16* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i16> @llvm.experimental.vp.strided.load.v2i16.p0i16.i32(i16* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x i16> @strided_vpload_v4i16(i16* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x i16> @llvm.experimental.vp.strided.load.v4i16.p0i16.i32(i16* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x i16> @strided_vpload_v8i16(i16* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x i16> @llvm.experimental.vp.strided.load.v8i16.p0i16.i32(i16* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <2 x i32> @strided_vpload_v2i32(i32* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i32> @llvm.experimental.vp.strided.load.v2i32.p0i32.i32(i32* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x i32> @strided_vpload_v4i32(i32* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x i32> @llvm.experimental.vp.strided.load.v4i32.p0i32.i32(i32* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x i32> @strided_vpload_v8i32(i32* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x i32> @llvm.experimental.vp.strided.load.v8i32.p0i32.i32(i32* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <2 x i64> @strided_vpload_v2i64(i64* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x i64> @llvm.experimental.vp.strided.load.v2i64.p0i64.i32(i64* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x i64> @strided_vpload_v4i64(i64* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x i64> @llvm.experimental.vp.strided.load.v4i64.p0i64.i32(i64* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x i64> @strided_vpload_v8i64(i64* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x i64> @llvm.experimental.vp.strided.load.v8i64.p0i64.i32(i64* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <2 x half> @strided_vpload_v2f16(half* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x half> @llvm.experimental.vp.strided.load.v2f16.p0f16.i32(half* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x half> @strided_vpload_v4f16(half* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x half> @llvm.experimental.vp.strided.load.v4f16.p0f16.i32(half* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x half> @strided_vpload_v8f16(half* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x half> @llvm.experimental.vp.strided.load.v8f16.p0f16.i32(half* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <2 x float> @strided_vpload_v2f32(float* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x float> @llvm.experimental.vp.strided.load.v2f32.p0f32.i32(float* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x float> @strided_vpload_v4f32(float* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x float> @llvm.experimental.vp.strided.load.v4f32.p0f32.i32(float* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x float> @strided_vpload_v8f32(float* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x float> @llvm.experimental.vp.strided.load.v8f32.p0f32.i32(float* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <2 x double> @strided_vpload_v2f64(double* %ptr, i32 signext %stride, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v2f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v2f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <2 x double> @llvm.experimental.vp.strided.load.v2f64.p0f64.i32(double* %ptr, i32 %stride, <2 x i1> %m, i32 %evl)
define <4 x double> @strided_vpload_v4f64(double* %ptr, i32 signext %stride, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v4f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v4f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <4 x double> @llvm.experimental.vp.strided.load.v4f64.p0f64.i32(double* %ptr, i32 %stride, <4 x i1> %m, i32 %evl)
define <8 x double> @strided_vpload_v8f64(double* %ptr, i32 signext %stride, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v8f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v8f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <8 x double> @llvm.experimental.vp.strided.load.v8f64.p0f64.i32(double* %ptr, i32 %stride, <8 x i1> %m, i32 %evl)
define <3 x double> @strided_vpload_v3f64(double* %ptr, i32 signext %stride, <3 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_v3f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_v3f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%v = call <3 x double> @llvm.experimental.vp.strided.load.v3f64.p0f64.i32(double* %ptr, i32 %stride, <3 x i1> %mask, i32 %evl)
; CHECK-RV32-NEXT: add a4, a0, a4
; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
-; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
; CHECK-RV64-NEXT: add a4, a0, a4
; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
-; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-RV32-NEXT: add t0, a1, t0
; CHECK-RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 2
-; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (t0), a2, v0.t
; CHECK-RV32-NEXT: addi t0, a4, -32
; CHECK-RV32-NEXT: li a7, 0
; CHECK-RV32-NEXT: add a3, a1, a3
; CHECK-RV32-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vslidedown.vi v0, v8, 4
-; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a3), a2, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-RV64-NEXT: add t0, a1, t0
; CHECK-RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 2
-; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (t0), a2, v0.t
; CHECK-RV64-NEXT: addi t0, a3, -32
; CHECK-RV64-NEXT: li a7, 0
; CHECK-RV64-NEXT: add a3, a1, a3
; CHECK-RV64-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vslidedown.vi v0, v8, 4
-; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a3), a2, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a5, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v8, (a1), a2, v0.t
; CHECK-RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
define <8 x i7> @vadd_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.add.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vadd_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.add.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vadd_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vadd_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
define <4 x i8> @vadd_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.add.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vadd_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vadd_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v4i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vadd_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
define <5 x i8> @vadd_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <5 x i8> @llvm.vp.add.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
define <5 x i8> @vadd_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
define <5 x i8> @vadd_vi_v5i8(<5 x i8> %va, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <5 x i8> poison, i8 -1, i32 0
define <8 x i8> @vadd_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.add.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vadd_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vadd_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
define <16 x i8> @vadd_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.add.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vadd_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vadd_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a0, a3
; CHECK-NEXT: .LBB32_2:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB32_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a1, 128
; CHECK-NEXT: .LBB32_4:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vadd_vi_v258i8_evl129:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 128
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vadd_vi_v258i8_evl128:
; CHECK: # %bb.0:
; CHECK-NEXT: li a0, 128
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <256 x i8> poison, i8 -1, i32 0
define <2 x i16> @vadd_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.add.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vadd_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vadd_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
define <4 x i16> @vadd_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.add.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vadd_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vadd_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
define <8 x i16> @vadd_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.add.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vadd_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vadd_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
define <16 x i16> @vadd_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.add.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vadd_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vadd_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
define <2 x i32> @vadd_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.add.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vadd_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vadd_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
define <4 x i32> @vadd_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.add.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vadd_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vadd_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
define <8 x i32> @vadd_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vadd_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vadd_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
define <16 x i32> @vadd_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.add.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vadd_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vadd_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
define <2 x i64> @vadd_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.add.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vadd_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
define <4 x i64> @vadd_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.add.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vadd_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
define <8 x i64> @vadd_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.add.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vadd_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
define <16 x i64> @vadd_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.add.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vadd_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB108_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB108_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB108_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB108_2:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: li a1, 16
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: bltu a0, a1, .LBB108_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB108_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
; RV64-NEXT: ret
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v16, -1
-; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, mu
+; RV32-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_v32i64_evl12:
; RV64: # %bb.0:
-; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 12, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <32 x i64> poison, i64 -1, i32 0
; RV32-NEXT: li a0, 32
; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vmv.v.i v24, -1
-; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v24, v0.t
-; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, mu
+; RV32-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vadd.vv v16, v16, v24, v0.t
; RV32-NEXT: ret
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v24, v0, 2
-; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 11, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
define <8 x i7> @vand_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.and.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vand_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.and.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vand_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vand_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v2i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vand_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 4, i32 0
define <4 x i8> @vand_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.and.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vand_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vand_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 4, i32 0
define <8 x i8> @vand_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.and.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vand_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vand_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 4, i32 0
define <16 x i8> @vand_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.and.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vand_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vand_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 4, i32 0
define <2 x i16> @vand_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.and.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vand_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vand_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 4, i32 0
define <4 x i16> @vand_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.and.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vand_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vand_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 4, i32 0
define <8 x i16> @vand_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.and.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vand_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vand_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 4, i32 0
define <16 x i16> @vand_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.and.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vand_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vand_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 4, i32 0
define <2 x i32> @vand_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.and.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vand_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vand_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 4, i32 0
define <4 x i32> @vand_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.and.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vand_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vand_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 4, i32 0
define <8 x i32> @vand_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.and.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vand_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vand_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 4, i32 0
define <16 x i32> @vand_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.and.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vand_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vand_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 4, i32 0
define <2 x i64> @vand_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.and.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vand.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vand_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 4, i32 0
define <4 x i64> @vand_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.and.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vand_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 4, i32 0
define <8 x i64> @vand_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.and.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vand_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 4, i32 0
define <11 x i64> @vand_vv_v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v11i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <11 x i64> @llvm.vp.and.v11i64(<11 x i64> %va, <11 x i64> %b, <11 x i1> %m, i32 %evl)
; RV32-NEXT: vmv.s.x v0, a1
; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; RV32-NEXT: vmerge.vxm v24, v24, a0, v0
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v16
; RV32-NEXT: vand.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_v11i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <11 x i64> poison, i64 %b, i32 0
define <11 x i64> @vand_vi_v11i64(<11 x i64> %va, <11 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v11i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <11 x i64> poison, i64 4, i32 0
define <16 x i64> @vand_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.and.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vand_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 4, i32 0
define <2 x half> @vfsgnj_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.copysign.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
define <4 x half> @vfsgnj_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.copysign.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
define <8 x half> @vfsgnj_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.copysign.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
define <16 x half> @vfsgnj_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.copysign.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
define <2 x float> @vfsgnj_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.copysign.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
define <4 x float> @vfsgnj_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.copysign.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
define <8 x float> @vfsgnj_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.copysign.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
define <16 x float> @vfsgnj_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.copysign.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
define <2 x double> @vfsgnj_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.copysign.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
define <4 x double> @vfsgnj_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.copysign.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
define <8 x double> @vfsgnj_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.copysign.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
define <15 x double> @vfsgnj_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <15 x double> @llvm.vp.copysign.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
define <16 x double> @vfsgnj_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.copysign.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB26_4:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vsra.vi v8, v8, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.sdiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vdiv_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.sdiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vdiv_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vdiv_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.sdiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vdiv_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <6 x i8> @vdiv_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v6i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <6 x i8> @llvm.vp.sdiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
define <8 x i8> @vdiv_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.sdiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vdiv_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vdiv_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.sdiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vdiv_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <2 x i16> @vdiv_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.sdiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vdiv_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vdiv_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.sdiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vdiv_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vdiv_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.sdiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vdiv_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vdiv_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.sdiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vdiv_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vdiv_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.sdiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vdiv_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vdiv_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.sdiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vdiv_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vdiv_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.sdiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vdiv_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vdiv_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.sdiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vdiv_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vdiv_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.sdiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vdiv_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.sdiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vdiv_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.sdiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vdiv_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.sdiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v9, v9, a1
; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.udiv.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vdivu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.udiv.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vdivu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vdivu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.udiv.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vdivu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <6 x i8> @vdivu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v6i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <6 x i8> @llvm.vp.udiv.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
define <8 x i8> @vdivu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.udiv.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vdivu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vdivu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.udiv.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vdivu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <2 x i16> @vdivu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.udiv.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vdivu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vdivu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.udiv.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vdivu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vdivu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.udiv.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vdivu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vdivu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.udiv.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vdivu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vdivu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.udiv.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vdivu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vdivu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.udiv.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vdivu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vdivu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.udiv.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vdivu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vdivu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.udiv.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vdivu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vdivu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.udiv.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vdivu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.udiv.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vdivu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.udiv.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vdivu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.udiv.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <2 x half> @vfadd_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fadd.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
define <2 x half> @vfadd_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
define <3 x half> @vfadd_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v3f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x half> @llvm.vp.fadd.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
define <4 x half> @vfadd_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.fadd.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
define <4 x half> @vfadd_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
define <8 x half> @vfadd_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.fadd.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
define <8 x half> @vfadd_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
define <16 x half> @vfadd_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.fadd.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
define <16 x half> @vfadd_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
define <2 x float> @vfadd_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fadd.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
define <2 x float> @vfadd_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <2 x float> @vfadd_vf_v2f32_commute(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v2f32_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <4 x float> @vfadd_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.fadd.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
define <4 x float> @vfadd_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
define <8 x float> @vfadd_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
define <8 x float> @vfadd_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
define <16 x float> @vfadd_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.fadd.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
define <16 x float> @vfadd_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
define <2 x double> @vfadd_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fadd.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
define <2 x double> @vfadd_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
define <4 x double> @vfadd_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.fadd.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
define <4 x double> @vfadd_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
define <8 x double> @vfadd_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.fadd.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
define <8 x double> @vfadd_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
define <16 x double> @vfadd_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.fadd.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
define <16 x double> @vfadd_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
define <2 x half> @vfdiv_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fdiv.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
define <2 x half> @vfdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
define <3 x half> @vfdiv_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v3f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x half> @llvm.vp.fdiv.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
define <4 x half> @vfdiv_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.fdiv.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
define <4 x half> @vfdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
define <8 x half> @vfdiv_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.fdiv.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
define <8 x half> @vfdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
define <16 x half> @vfdiv_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.fdiv.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
define <16 x half> @vfdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
define <2 x float> @vfdiv_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fdiv.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
define <2 x float> @vfdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <4 x float> @vfdiv_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.fdiv.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
define <4 x float> @vfdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
define <8 x float> @vfdiv_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.fdiv.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
define <8 x float> @vfdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
define <16 x float> @vfdiv_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.fdiv.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
define <16 x float> @vfdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
define <2 x double> @vfdiv_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fdiv.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
define <2 x double> @vfdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
define <4 x double> @vfdiv_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.fdiv.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
define <4 x double> @vfdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
define <8 x double> @vfdiv_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.fdiv.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
define <8 x double> @vfdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
define <16 x double> @vfdiv_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.fdiv.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
define <16 x double> @vfdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
define <2 x half> @vfmax_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.maxnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
define <4 x half> @vfmax_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.maxnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
define <8 x half> @vfmax_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.maxnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
define <16 x half> @vfmax_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.maxnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
define <2 x float> @vfmax_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.maxnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
define <4 x float> @vfmax_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.maxnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
define <8 x float> @vfmax_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.maxnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
define <16 x float> @vfmax_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.maxnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
define <2 x double> @vfmax_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.maxnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
define <4 x double> @vfmax_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.maxnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
define <8 x double> @vfmax_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.maxnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
define <15 x double> @vfmax_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <15 x double> @llvm.vp.maxnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
define <16 x double> @vfmax_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.maxnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB26_4:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
define <2 x half> @vfmin_vv_v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.minnum.v2f16(<2 x half> %va, <2 x half> %vb, <2 x i1> %m, i32 %evl)
define <4 x half> @vfmin_vv_v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.minnum.v4f16(<4 x half> %va, <4 x half> %vb, <4 x i1> %m, i32 %evl)
define <8 x half> @vfmin_vv_v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.minnum.v8f16(<8 x half> %va, <8 x half> %vb, <8 x i1> %m, i32 %evl)
define <16 x half> @vfmin_vv_v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.minnum.v16f16(<16 x half> %va, <16 x half> %vb, <16 x i1> %m, i32 %evl)
define <2 x float> @vfmin_vv_v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.minnum.v2f32(<2 x float> %va, <2 x float> %vb, <2 x i1> %m, i32 %evl)
define <4 x float> @vfmin_vv_v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.minnum.v4f32(<4 x float> %va, <4 x float> %vb, <4 x i1> %m, i32 %evl)
define <8 x float> @vfmin_vv_v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.minnum.v8f32(<8 x float> %va, <8 x float> %vb, <8 x i1> %m, i32 %evl)
define <16 x float> @vfmin_vv_v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.minnum.v16f32(<16 x float> %va, <16 x float> %vb, <16 x i1> %m, i32 %evl)
define <2 x double> @vfmin_vv_v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.minnum.v2f64(<2 x double> %va, <2 x double> %vb, <2 x i1> %m, i32 %evl)
define <4 x double> @vfmin_vv_v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.minnum.v4f64(<4 x double> %va, <4 x double> %vb, <4 x i1> %m, i32 %evl)
define <8 x double> @vfmin_vv_v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.minnum.v8f64(<8 x double> %va, <8 x double> %vb, <8 x i1> %m, i32 %evl)
define <15 x double> @vfmin_vv_v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v15f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <15 x double> @llvm.vp.minnum.v15f64(<15 x double> %va, <15 x double> %vb, <15 x i1> %m, i32 %evl)
define <16 x double> @vfmin_vv_v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.minnum.v16f64(<16 x double> %va, <16 x double> %vb, <16 x i1> %m, i32 %evl)
; CHECK-NEXT: vle64.v v8, (a0)
; CHECK-NEXT: addi a0, sp, 16
; CHECK-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: li a0, 16
; CHECK-NEXT: csrr a1, vlenb
; CHECK-NEXT: slli a1, a1, 3
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: .LBB26_4:
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v1
; CHECK-NEXT: csrr a0, vlenb
; CHECK-NEXT: slli a0, a0, 4
define <2 x half> @vfmul_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fmul.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
define <2 x half> @vfmul_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
define <3 x half> @vfmul_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v3f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x half> @llvm.vp.fmul.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
define <4 x half> @vfmul_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.fmul.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
define <4 x half> @vfmul_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
define <8 x half> @vfmul_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.fmul.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
define <8 x half> @vfmul_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
define <16 x half> @vfmul_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.fmul.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
define <16 x half> @vfmul_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
define <2 x float> @vfmul_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fmul.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
define <2 x float> @vfmul_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <4 x float> @vfmul_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.fmul.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
define <4 x float> @vfmul_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
define <8 x float> @vfmul_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.fmul.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
define <8 x float> @vfmul_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
define <16 x float> @vfmul_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.fmul.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
define <16 x float> @vfmul_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
define <2 x double> @vfmul_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fmul.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
define <2 x double> @vfmul_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
define <4 x double> @vfmul_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.fmul.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
define <4 x double> @vfmul_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
define <8 x double> @vfmul_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.fmul.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
define <8 x double> @vfmul_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
define <16 x double> @vfmul_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.fmul.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
define <16 x double> @vfmul_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
define <2 x half> @vfrdiv_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
define <4 x half> @vfrdiv_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
define <8 x half> @vfrdiv_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
define <16 x half> @vfrdiv_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
define <2 x float> @vfrdiv_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <4 x float> @vfrdiv_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
define <8 x float> @vfrdiv_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
define <16 x float> @vfrdiv_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
define <2 x double> @vfrdiv_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
define <4 x double> @vfrdiv_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
define <8 x double> @vfrdiv_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
define <16 x double> @vfrdiv_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
define <2 x half> @vfrsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
define <4 x half> @vfrsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
define <8 x half> @vfrsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
define <16 x half> @vfrsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
define <2 x float> @vfrsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <4 x float> @vfrsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
define <8 x float> @vfrsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
define <16 x float> @vfrsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
define <2 x double> @vfrsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
define <4 x double> @vfrsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
define <8 x double> @vfrsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
define <16 x double> @vfrsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
define <2 x half> @vfsub_vv_v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x half> @llvm.vp.fsub.v2f16(<2 x half> %va, <2 x half> %b, <2 x i1> %m, i32 %evl)
define <2 x half> @vfsub_vf_v2f16(<2 x half> %va, half %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x half> poison, half %b, i32 0
define <3 x half> @vfsub_vv_v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v3f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x half> @llvm.vp.fsub.v3f16(<3 x half> %va, <3 x half> %b, <3 x i1> %m, i32 %evl)
define <4 x half> @vfsub_vv_v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x half> @llvm.vp.fsub.v4f16(<4 x half> %va, <4 x half> %b, <4 x i1> %m, i32 %evl)
define <4 x half> @vfsub_vf_v4f16(<4 x half> %va, half %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x half> poison, half %b, i32 0
define <8 x half> @vfsub_vv_v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x half> @llvm.vp.fsub.v8f16(<8 x half> %va, <8 x half> %b, <8 x i1> %m, i32 %evl)
define <8 x half> @vfsub_vf_v8f16(<8 x half> %va, half %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x half> poison, half %b, i32 0
define <16 x half> @vfsub_vv_v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x half> @llvm.vp.fsub.v16f16(<16 x half> %va, <16 x half> %b, <16 x i1> %m, i32 %evl)
define <16 x half> @vfsub_vf_v16f16(<16 x half> %va, half %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x half> poison, half %b, i32 0
define <2 x float> @vfsub_vv_v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x float> @llvm.vp.fsub.v2f32(<2 x float> %va, <2 x float> %b, <2 x i1> %m, i32 %evl)
define <2 x float> @vfsub_vf_v2f32(<2 x float> %va, float %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x float> poison, float %b, i32 0
define <4 x float> @vfsub_vv_v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x float> @llvm.vp.fsub.v4f32(<4 x float> %va, <4 x float> %b, <4 x i1> %m, i32 %evl)
define <4 x float> @vfsub_vf_v4f32(<4 x float> %va, float %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x float> poison, float %b, i32 0
define <8 x float> @vfsub_vv_v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x float> @llvm.vp.fsub.v8f32(<8 x float> %va, <8 x float> %b, <8 x i1> %m, i32 %evl)
define <8 x float> @vfsub_vf_v8f32(<8 x float> %va, float %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x float> poison, float %b, i32 0
define <16 x float> @vfsub_vv_v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x float> @llvm.vp.fsub.v16f32(<16 x float> %va, <16 x float> %b, <16 x i1> %m, i32 %evl)
define <16 x float> @vfsub_vf_v16f32(<16 x float> %va, float %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x float> poison, float %b, i32 0
define <2 x double> @vfsub_vv_v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x double> @llvm.vp.fsub.v2f64(<2 x double> %va, <2 x double> %b, <2 x i1> %m, i32 %evl)
define <2 x double> @vfsub_vf_v2f64(<2 x double> %va, double %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x double> poison, double %b, i32 0
define <4 x double> @vfsub_vv_v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x double> @llvm.vp.fsub.v4f64(<4 x double> %va, <4 x double> %b, <4 x i1> %m, i32 %evl)
define <4 x double> @vfsub_vf_v4f64(<4 x double> %va, double %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x double> poison, double %b, i32 0
define <8 x double> @vfsub_vv_v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x double> @llvm.vp.fsub.v8f64(<8 x double> %va, <8 x double> %b, <8 x i1> %m, i32 %evl)
define <8 x double> @vfsub_vf_v8f64(<8 x double> %va, double %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x double> poison, double %b, i32 0
define <16 x double> @vfsub_vv_v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x double> @llvm.vp.fsub.v16f64(<16 x double> %va, <16 x double> %b, <16 x i1> %m, i32 %evl)
define <16 x double> @vfsub_vf_v16f64(<16 x double> %va, double %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_v16f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x double> poison, double %b, i32 0
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vsra.vi v8, v8, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.smax.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vmax_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.smax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vmax_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmax_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.smax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vmax_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmax_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v4i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <5 x i8> @vmax_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <5 x i8> @llvm.vp.smax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
define <5 x i8> @vmax_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
define <8 x i8> @vmax_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.smax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vmax_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vmax_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.smax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vmax_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a4
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB22_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_4:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vmax_vx_v258i8_evl129:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vmax_vx_v258i8_evl128:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <2 x i16> @vmax_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.smax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vmax_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vmax_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.smax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vmax_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vmax_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.smax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vmax_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vmax_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.smax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vmax_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vmax_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.smax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vmax_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vmax_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.smax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vmax_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vmax_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.smax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vmax_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vmax_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.smax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vmax_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vmax_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.smax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vmax_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.smax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vmax_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.smax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vmax_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.smax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB74_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vmax.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vmax.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: mv a2, a1
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a1, -1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vmax.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vmax.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v9, v9, a1
; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.umax.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vmaxu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.umax.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vmaxu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmaxu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.umax.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vmaxu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmaxu_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v4i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <5 x i8> @vmaxu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <5 x i8> @llvm.vp.umax.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
define <5 x i8> @vmaxu_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
define <8 x i8> @vmaxu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.umax.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vmaxu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vmaxu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.umax.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vmaxu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a4
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB22_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_4:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vmaxu_vx_v258i8_evl129:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vmaxu_vx_v258i8_evl128:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <2 x i16> @vmaxu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.umax.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vmaxu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vmaxu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.umax.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vmaxu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vmaxu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.umax.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vmaxu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vmaxu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.umax.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vmaxu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vmaxu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.umax.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vmaxu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vmaxu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.umax.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vmaxu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vmaxu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.umax.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vmaxu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vmaxu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.umax.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vmaxu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vmaxu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.umax.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vmaxu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.umax.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vmaxu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.umax.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vmaxu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.umax.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB74_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vmaxu.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vmaxu.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: mv a2, a1
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a1, -1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vmaxu.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vmaxu.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vsra.vi v8, v8, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.smin.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vmin_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.smin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vmin_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmin_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.smin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vmin_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmin_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v4i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <5 x i8> @vmin_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <5 x i8> @llvm.vp.smin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
define <5 x i8> @vmin_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
define <8 x i8> @vmin_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.smin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vmin_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vmin_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.smin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vmin_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a4
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB22_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_4:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vmin_vx_v258i8_evl129:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vmin_vx_v258i8_evl128:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <2 x i16> @vmin_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.smin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vmin_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vmin_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.smin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vmin_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vmin_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.smin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vmin_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vmin_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.smin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vmin_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vmin_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.smin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vmin_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vmin_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.smin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vmin_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vmin_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.smin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vmin_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vmin_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.smin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vmin_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vmin_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.smin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vmin_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.smin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vmin_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.smin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vmin_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.smin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB74_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vmin.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vmin.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: mv a2, a1
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a1, -1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vmin.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vmin.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v9, v9, a1
; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.umin.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vminu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.umin.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vminu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vminu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.umin.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vminu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vminu_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v4i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <5 x i8> @vminu_vv_v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <5 x i8> @llvm.vp.umin.v5i8(<5 x i8> %va, <5 x i8> %b, <5 x i1> %m, i32 %evl)
define <5 x i8> @vminu_vx_v5i8(<5 x i8> %va, i8 %b, <5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <5 x i8> poison, i8 %b, i32 0
define <8 x i8> @vminu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.umin.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vminu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vminu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.umin.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vminu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a1, a4
; CHECK-NEXT: .LBB22_2:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v25
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a2, a3, .LBB22_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a2, 128
; CHECK-NEXT: .LBB22_4:
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vminu_vx_v258i8_evl129:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 1, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
; CHECK-LABEL: vminu_vx_v258i8_evl128:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 128
-; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <2 x i16> @vminu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.umin.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vminu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vminu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.umin.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vminu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vminu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.umin.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vminu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vminu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.umin.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vminu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vminu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.umin.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vminu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vminu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.umin.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vminu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vminu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.umin.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vminu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vminu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.umin.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vminu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vminu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.umin.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vminu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.umin.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vminu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.umin.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vminu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.umin.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB74_2:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: li a1, 16
; RV32-NEXT: vminu.vv v16, v16, v24, v0.t
; RV32-NEXT: bltu a0, a1, .LBB74_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB74_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vminu.vv v8, v8, v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: mv a2, a1
; RV64-NEXT: .LBB74_2:
; RV64-NEXT: li a1, -1
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: li a2, 16
; RV64-NEXT: vminu.vx v16, v16, a1, v0.t
; RV64-NEXT: bltu a0, a2, .LBB74_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB74_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vminu.vx v8, v8, a1, v0.t
; RV64-NEXT: ret
define <8 x i7> @vmul_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.mul.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vmul_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.mul.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vmul_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vmul_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.mul.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vmul_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <8 x i8> @vmul_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.mul.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vmul_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vmul_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.mul.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vmul_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <2 x i16> @vmul_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.mul.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vmul_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vmul_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.mul.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vmul_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vmul_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.mul.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vmul_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vmul_vx_v8i16_commute(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v8i16_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <12 x i16> @vmul_vv_v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v12i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <12 x i16> @llvm.vp.mul.v12i16(<12 x i16> %va, <12 x i16> %b, <12 x i1> %m, i32 %evl)
define <12 x i16> @vmul_vx_v12i16(<12 x i16> %va, i16 %b, <12 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v12i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <12 x i16> poison, i16 %b, i32 0
define <16 x i16> @vmul_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.mul.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vmul_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vmul_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.mul.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vmul_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vmul_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.mul.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vmul_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vmul_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.mul.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vmul_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vmul_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.mul.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vmul_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vmul_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.mul.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vmul_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.mul.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vmul_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.mul.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vmul_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.mul.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <8 x i7> @vor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.or.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.or.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vor_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 5, i32 0
define <4 x i8> @vor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.or.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vor_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vor_vx_v4i8_commute(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v4i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 5, i32 0
define <7 x i8> @vor_vv_v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <7 x i8> @llvm.vp.or.v5i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
define <7 x i8> @vor_vx_v5i8(<7 x i8> %va, i8 %b, <7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <7 x i8> poison, i8 %b, i32 0
define <7 x i8> @vor_vi_v5i8(<7 x i8> %va, <7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <7 x i8> poison, i8 5, i32 0
define <8 x i8> @vor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.or.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vor_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 5, i32 0
define <16 x i8> @vor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.or.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vor_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 5, i32 0
define <2 x i16> @vor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.or.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vor_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 5, i32 0
define <4 x i16> @vor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.or.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vor_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 5, i32 0
define <8 x i16> @vor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.or.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vor_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 5, i32 0
define <16 x i16> @vor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.or.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vor_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 5, i32 0
define <2 x i32> @vor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.or.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vor_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 5, i32 0
define <4 x i32> @vor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.or.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vor_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 5, i32 0
define <8 x i32> @vor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.or.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vor_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 5, i32 0
define <16 x i32> @vor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.or.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vor_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 5, i32 0
define <2 x i64> @vor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.or.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vor.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 5, i32 0
define <4 x i64> @vor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.or.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vor.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 5, i32 0
define <8 x i64> @vor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.or.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 5, i32 0
define <16 x i64> @vor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.or.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 5, i32 0
define <2 x i8> @vpgather_v2i8(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <2 x i16> @vpgather_v2i8_sextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8_sextload_v2i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
;
; RV64-LABEL: vpgather_v2i8_sextload_v2i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV64-NEXT: vsext.vf2 v8, v9
define <2 x i16> @vpgather_v2i8_zextload_v2i16(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8_zextload_v2i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
;
; RV64-LABEL: vpgather_v2i8_zextload_v2i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; RV64-NEXT: vzext.vf2 v8, v9
define <2 x i32> @vpgather_v2i8_sextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8_sextload_v2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vsext.vf4 v8, v9
;
; RV64-LABEL: vpgather_v2i8_sextload_v2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vsext.vf4 v8, v9
define <2 x i32> @vpgather_v2i8_zextload_v2i32(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8_zextload_v2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vzext.vf4 v8, v9
;
; RV64-LABEL: vpgather_v2i8_zextload_v2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vzext.vf4 v8, v9
define <2 x i64> @vpgather_v2i8_sextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8_sextload_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vsext.vf8 v8, v9
;
; RV64-LABEL: vpgather_v2i8_sextload_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vsext.vf8 v8, v9
define <2 x i64> @vpgather_v2i8_zextload_v2i64(<2 x i8*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i8_zextload_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vzext.vf8 v8, v9
;
; RV64-LABEL: vpgather_v2i8_zextload_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vzext.vf8 v8, v9
define <3 x i8> @vpgather_v3i8(<3 x i8*> %ptrs, <3 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v3i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v3i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <4 x i8> @vpgather_v4i8(<4 x i8*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <8 x i8> @vpgather_v8i8(<8 x i8*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
-; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
-; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <8 x i8> %idxs
; RV32-NEXT: li a2, 32
; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v16, v8
-; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsext.vf8 v16, v12
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v10, 2
-; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB13_4
; RV64-NEXT: .LBB13_4:
; RV64-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
-; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: li a0, 32
define <2 x i16> @vpgather_v2i16(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <2 x i32> @vpgather_v2i16_sextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i16_sextload_v2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
;
; RV64-LABEL: vpgather_v2i16_sextload_v2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vsext.vf2 v8, v9
define <2 x i32> @vpgather_v2i16_zextload_v2i32(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i16_zextload_v2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
;
; RV64-LABEL: vpgather_v2i16_zextload_v2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; RV64-NEXT: vzext.vf2 v8, v9
define <2 x i64> @vpgather_v2i16_sextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i16_sextload_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vsext.vf4 v8, v9
;
; RV64-LABEL: vpgather_v2i16_sextload_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vsext.vf4 v8, v9
define <2 x i64> @vpgather_v2i16_zextload_v2i64(<2 x i16*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i16_zextload_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vzext.vf4 v8, v9
;
; RV64-LABEL: vpgather_v2i16_zextload_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vzext.vf4 v8, v9
define <4 x i16> @vpgather_v4i16(<4 x i16*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <8 x i16> @vpgather_v8i16(<8 x i16*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, i16* %base, <8 x i8> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i16>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf8 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i16>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, i16* %base, <8 x i16> %idxs
define <2 x i32> @vpgather_v2i32(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <2 x i64> @vpgather_v2i32_sextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i32_sextload_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
;
; RV64-LABEL: vpgather_v2i32_sextload_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vsext.vf2 v8, v9
define <2 x i64> @vpgather_v2i32_zextload_v2i64(<2 x i32*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i32_zextload_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
;
; RV64-LABEL: vpgather_v2i32_zextload_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV64-NEXT: vzext.vf2 v8, v9
define <4 x i32> @vpgather_v4i32(<4 x i32*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
define <8 x i32> @vpgather_v8i32(<8 x i32*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <8 x i8> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i32>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf8 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i32>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <8 x i16> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i32>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf2 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i32>
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf2 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <8 x i32> %idxs
define <2 x i64> @vpgather_v2i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <2 x i64> @llvm.vp.gather.v2i64.v2p0i64(<2 x i64*> %ptrs, <2 x i1> %m, i32 %evl)
define <4 x i64> @vpgather_v4i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <4 x i64> @llvm.vp.gather.v4i64.v4p0i64(<4 x i64*> %ptrs, <4 x i1> %m, i32 %evl)
define <8 x i64> @vpgather_v8i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <8 x i64> @llvm.vp.gather.v8i64.v8p0i64(<8 x i64*> %ptrs, <8 x i1> %m, i32 %evl)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <8 x i8> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf8 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <8 x i16> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf2 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf2 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <8 x i32> %idxs
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf2 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i32> %idxs to <8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf2 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i32> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vsll.vi v12, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsll.vi v8, v8, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <8 x i64> %idxs
define <2 x half> @vpgather_v2f16(<2 x half*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <4 x half> @vpgather_v4f16(<4 x half*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <8 x half> @vpgather_v8f16(<8 x half*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, half* %base, <8 x i8> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i16>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf8 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i16>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vadd.vv v10, v10, v10
-; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v10, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vadd.vv v12, v12, v12
-; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, half* %base, <8 x i16> %idxs
define <2 x float> @vpgather_v2f32(<2 x float*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <4 x float> @vpgather_v4f32(<4 x float*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
define <8 x float> @vpgather_v8f32(<8 x float*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <8 x i8> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i32>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf8 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i32>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <8 x i16> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i32>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf2 v10, v8
; RV32-NEXT: vsll.vi v8, v10, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i32>
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf2 v12, v8
; RV64-NEXT: vsll.vi v12, v12, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v12, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <8 x i32> %idxs
define <2 x double> @vpgather_v2f64(<2 x double*> %ptrs, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v2f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <2 x double> @llvm.vp.gather.v2f64.v2p0f64(<2 x double*> %ptrs, <2 x i1> %m, i32 %evl)
define <4 x double> @vpgather_v4f64(<4 x double*> %ptrs, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v4f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v4f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %m, i32 %evl)
define <8 x double> @vpgather_v8f64(<8 x double*> %ptrs, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_v8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_v8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <8 x double> @llvm.vp.gather.v8f64.v8p0f64(<8 x double*> %ptrs, <8 x i1> %m, i32 %evl)
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <8 x i8> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf4 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf8 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i8> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf4 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf8 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i8> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <8 x i16> %idxs
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsext.vf2 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf4 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i16> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vzext.vf2 v10, v8
; RV32-NEXT: vsll.vi v12, v10, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf4 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i16> %idxs to <8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf2 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <8 x i32> %idxs
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsext.vf2 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <8 x i32> %idxs to <8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vsll.vi v12, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vzext.vf2 v12, v8
; RV64-NEXT: vsll.vi v8, v12, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <8 x i32> %idxs to <8 x i64>
; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; RV32-NEXT: vnsrl.wi v12, v8, 0
; RV32-NEXT: vsll.vi v12, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV64-NEXT: vsll.vi v8, v8, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <8 x i64> %idxs
; RV32-NEXT: vslidedown.vi v24, v8, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v1, 2
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v24, v0.t
; RV32-NEXT: li a1, 16
; RV32-NEXT: bltu a0, a1, .LBB86_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a0, 16
; RV32-NEXT: .LBB86_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v24
; RV64-NEXT: .LBB86_2:
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v24, 2
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t
; RV64-NEXT: li a1, 16
; RV64-NEXT: bltu a0, a1, .LBB86_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a0, 16
; RV64-NEXT: .LBB86_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB87_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v10, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB87_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB87_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB88_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v10, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB88_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB88_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB89_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v10, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB89_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB89_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v10
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB90_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v12, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB90_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB90_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB91_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v12, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB91_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB91_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB92_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v0, v0, 2
; RV32-NEXT: vsetivli zero, 16, e32, m8, ta, ma
; RV32-NEXT: vslidedown.vi v24, v16, 16
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v24, v24, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v12, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB92_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB92_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB93_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsll.vi v8, v0, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v24, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB93_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB93_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB94_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: addi a3, sp, 16
; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vslidedown.vi v0, v24, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB94_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB94_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV32-NEXT: # %bb.1:
; RV32-NEXT: li a2, 16
; RV32-NEXT: .LBB95_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: addi a3, a1, -16
; RV32-NEXT: li a2, 0
; RV32-NEXT: vslidedown.vi v24, v16, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v0, 2
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: addi a3, sp, 16
; RV64-NEXT: vl1r.v v24, (a3) # Unknown-size Folded Reload
; RV64-NEXT: vslidedown.vi v0, v24, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB95_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB95_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: csrr a0, vlenb
; RV32-NEXT: vslidedown.vi v8, v24, 16
; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV32-NEXT: vslidedown.vi v0, v1, 2
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v8, v0.t
; RV32-NEXT: li a2, 16
; RV32-NEXT: bltu a1, a2, .LBB96_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: li a1, 16
; RV32-NEXT: .LBB96_4:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v1
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: vsll.vi v8, v8, 3
; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; RV64-NEXT: vslidedown.vi v0, v24, 2
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: li a2, 16
; RV64-NEXT: bltu a1, a2, .LBB96_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: li a1, 16
; RV64-NEXT: .LBB96_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
define <2 x i8> @vpload_v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i8> @llvm.vp.load.v2i8.p0v2i8(<2 x i8>* %ptr, <2 x i1> %m, i32 %evl)
define <3 x i8> @vpload_v3i8(<3 x i8>* %ptr, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <3 x i8> @llvm.vp.load.v3i8.p0v3i8(<3 x i8>* %ptr, <3 x i1> %m, i32 %evl)
define <4 x i8> @vpload_v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i8> @llvm.vp.load.v4i8.p0v4i8(<4 x i8>* %ptr, <4 x i1> %m, i32 %evl)
define <8 x i8> @vpload_v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>* %ptr, <8 x i1> %m, i32 %evl)
define <2 x i16> @vpload_v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i16> @llvm.vp.load.v2i16.p0v2i16(<2 x i16>* %ptr, <2 x i1> %m, i32 %evl)
define <4 x i16> @vpload_v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i16> @llvm.vp.load.v4i16.p0v4i16(<4 x i16>* %ptr, <4 x i1> %m, i32 %evl)
define <8 x i16> @vpload_v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i16> @llvm.vp.load.v8i16.p0v8i16(<8 x i16>* %ptr, <8 x i1> %m, i32 %evl)
define <2 x i32> @vpload_v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i32> @llvm.vp.load.v2i32.p0v2i32(<2 x i32>* %ptr, <2 x i1> %m, i32 %evl)
define <4 x i32> @vpload_v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i32> @llvm.vp.load.v4i32.p0v4i32(<4 x i32>* %ptr, <4 x i1> %m, i32 %evl)
define <6 x i32> @vpload_v6i32(<6 x i32>* %ptr, <6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v6i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <6 x i32> @llvm.vp.load.v6i32.p0v6i32(<6 x i32>* %ptr, <6 x i1> %m, i32 %evl)
define <8 x i32> @vpload_v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %ptr, <8 x i1> %m, i32 %evl)
define <2 x i64> @vpload_v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x i64> @llvm.vp.load.v2i64.p0v2i64(<2 x i64>* %ptr, <2 x i1> %m, i32 %evl)
define <4 x i64> @vpload_v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x i64> @llvm.vp.load.v4i64.p0v4i64(<4 x i64>* %ptr, <4 x i1> %m, i32 %evl)
define <8 x i64> @vpload_v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x i64> @llvm.vp.load.v8i64.p0v8i64(<8 x i64>* %ptr, <8 x i1> %m, i32 %evl)
define <2 x half> @vpload_v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x half> @llvm.vp.load.v2f16.p0v2f16(<2 x half>* %ptr, <2 x i1> %m, i32 %evl)
define <4 x half> @vpload_v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x half> @llvm.vp.load.v4f16.p0v4f16(<4 x half>* %ptr, <4 x i1> %m, i32 %evl)
define <8 x half> @vpload_v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x half> @llvm.vp.load.v8f16.p0v8f16(<8 x half>* %ptr, <8 x i1> %m, i32 %evl)
define <2 x float> @vpload_v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x float> @llvm.vp.load.v2f32.p0v2f32(<2 x float>* %ptr, <2 x i1> %m, i32 %evl)
define <4 x float> @vpload_v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>* %ptr, <4 x i1> %m, i32 %evl)
define <8 x float> @vpload_v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x float> @llvm.vp.load.v8f32.p0v8f32(<8 x float>* %ptr, <8 x i1> %m, i32 %evl)
define <2 x double> @vpload_v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <2 x double> @llvm.vp.load.v2f64.p0v2f64(<2 x double>* %ptr, <2 x i1> %m, i32 %evl)
define <4 x double> @vpload_v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <4 x double> @llvm.vp.load.v4f64.p0v4f64(<4 x double>* %ptr, <4 x i1> %m, i32 %evl)
define <8 x double> @vpload_v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_v8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <8 x double> @llvm.vp.load.v8f64.p0v8f64(<8 x double>* %ptr, <8 x i1> %m, i32 %evl)
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
; CHECK-NEXT: addi a3, a0, 128
-; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a3), v0.t
; CHECK-NEXT: li a2, 16
; CHECK-NEXT: bltu a1, a2, .LBB31_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: li a1, 16
; CHECK-NEXT: .LBB31_4:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 2
; CHECK-NEXT: addi a5, a1, 128
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a5), v0.t
; CHECK-NEXT: addi a5, a2, -32
; CHECK-NEXT: li a4, 0
; CHECK-NEXT: vsetivli zero, 4, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vi v0, v8, 4
; CHECK-NEXT: addi a5, a1, 256
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a5), v0.t
; CHECK-NEXT: bltu a3, a2, .LBB32_10
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: li a3, 16
; CHECK-NEXT: .LBB32_10:
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a1), v0.t
; CHECK-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; CHECK-NEXT: vsra.vi v9, v9, 1
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vsra.vi v8, v8, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.srem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vrem_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.srem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vrem_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vrem_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.srem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vrem_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <6 x i8> @vrem_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v6i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <6 x i8> @llvm.vp.srem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
define <8 x i8> @vrem_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.srem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vrem_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vrem_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.srem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vrem_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <2 x i16> @vrem_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.srem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vrem_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vrem_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.srem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vrem_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vrem_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.srem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vrem_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vrem_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.srem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vrem_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vrem_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.srem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vrem_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vrem_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.srem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vrem_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vrem_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.srem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vrem_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vrem_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.srem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vrem_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vrem_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.srem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vrem_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.srem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vrem_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.srem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vrem_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.srem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v9, v9, a1
; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.urem.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vremu_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.urem.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vremu_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <4 x i8> @vremu_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.urem.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vremu_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <6 x i8> @vremu_vv_v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v6i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <6 x i8> @llvm.vp.urem.v6i8(<6 x i8> %va, <6 x i8> %b, <6 x i1> %m, i32 %evl)
define <8 x i8> @vremu_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.urem.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vremu_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vremu_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.urem.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vremu_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <2 x i16> @vremu_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.urem.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vremu_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vremu_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.urem.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vremu_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vremu_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.urem.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vremu_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vremu_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.urem.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vremu_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vremu_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.urem.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vremu_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vremu_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.urem.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vremu_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vremu_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.urem.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vremu_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vremu_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.urem.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vremu_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vremu_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.urem.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vremu_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.urem.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vremu_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.urem.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vremu_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.urem.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <2 x i8> @vrsub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vrsub_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 2, i32 0
define <4 x i8> @vrsub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vrsub_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 2, i32 0
define <8 x i8> @vrsub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vrsub_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 2, i32 0
define <16 x i8> @vrsub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vrsub_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 2, i32 0
define <2 x i16> @vrsub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vrsub_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 2, i32 0
define <4 x i16> @vrsub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vrsub_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 2, i32 0
define <8 x i16> @vrsub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vrsub_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 2, i32 0
define <16 x i16> @vrsub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vrsub_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 2, i32 0
define <2 x i32> @vrsub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vrsub_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 2, i32 0
define <4 x i32> @vrsub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vrsub_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 2, i32 0
define <8 x i32> @vrsub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vrsub_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 2, i32 0
define <16 x i32> @vrsub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vrsub_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsub.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vrsub_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsub.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vrsub_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsub.vv v8, v12, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vrsub_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vrsub_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 2, i32 0
; CHECK-NEXT: li a1, 127
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v9, v9, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.shl.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vsll_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.shl.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vsll_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vsll_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 3, i32 0
define <3 x i8> @vsll_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x i8> @llvm.vp.shl.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl)
define <4 x i8> @vsll_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.shl.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vsll_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vsll_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 3, i32 0
define <8 x i8> @vsll_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.shl.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vsll_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vsll_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 3, i32 0
define <16 x i8> @vsll_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.shl.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vsll_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vsll_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 3, i32 0
define <2 x i16> @vsll_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.shl.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vsll_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vsll_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 3, i32 0
define <4 x i16> @vsll_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.shl.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vsll_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vsll_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 3, i32 0
define <8 x i16> @vsll_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.shl.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vsll_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vsll_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 3, i32 0
define <16 x i16> @vsll_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.shl.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vsll_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vsll_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 3, i32 0
define <2 x i32> @vsll_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.shl.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vsll_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vsll_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 3, i32 0
define <4 x i32> @vsll_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.shl.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vsll_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vsll_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 3, i32 0
define <8 x i32> @vsll_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.shl.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vsll_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vsll_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 3, i32 0
define <16 x i32> @vsll_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.shl.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vsll_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vsll_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 3, i32 0
define <2 x i64> @vsll_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.shl.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
define <2 x i64> @vsll_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vsll_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 3, i32 0
define <4 x i64> @vsll_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.shl.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
define <4 x i64> @vsll_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_v4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vsll_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 3, i32 0
define <8 x i64> @vsll_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.shl.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
define <8 x i64> @vsll_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_v8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vsll_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 3, i32 0
define <16 x i64> @vsll_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.shl.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
define <16 x i64> @vsll_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vsll_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 3, i32 0
; CHECK-NEXT: vand.vx v9, v9, a1
; CHECK-NEXT: vadd.vv v8, v8, v8
; CHECK-NEXT: vsra.vi v8, v8, 1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.ashr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vsra_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.ashr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vsra_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vsra_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 5, i32 0
define <4 x i8> @vsra_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.ashr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vsra_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vsra_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 5, i32 0
define <7 x i8> @vsra_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v7i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <7 x i8> @llvm.vp.ashr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
define <8 x i8> @vsra_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.ashr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vsra_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vsra_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 5, i32 0
define <16 x i8> @vsra_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.ashr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vsra_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vsra_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 5, i32 0
define <2 x i16> @vsra_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.ashr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vsra_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vsra_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 5, i32 0
define <4 x i16> @vsra_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.ashr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vsra_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vsra_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 5, i32 0
define <8 x i16> @vsra_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.ashr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vsra_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vsra_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 5, i32 0
define <16 x i16> @vsra_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.ashr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vsra_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vsra_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 5, i32 0
define <2 x i32> @vsra_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.ashr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vsra_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vsra_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 5, i32 0
define <4 x i32> @vsra_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.ashr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vsra_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vsra_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 5, i32 0
define <8 x i32> @vsra_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.ashr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vsra_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vsra_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 5, i32 0
define <16 x i32> @vsra_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.ashr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vsra_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vsra_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 5, i32 0
define <2 x i64> @vsra_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.ashr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
define <2 x i64> @vsra_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vsra_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 5, i32 0
define <4 x i64> @vsra_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.ashr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
define <4 x i64> @vsra_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_v4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vsra_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 5, i32 0
define <8 x i64> @vsra_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.ashr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
define <8 x i64> @vsra_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_v8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vsra_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 5, i32 0
define <16 x i64> @vsra_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.ashr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
define <16 x i64> @vsra_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vsra_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 5, i32 0
; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v9, v9, a1
; CHECK-NEXT: vand.vx v8, v8, a1
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.lshr.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vsrl_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.lshr.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vsrl_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vsrl_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 4, i32 0
define <4 x i8> @vsrl_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.lshr.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vsrl_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vsrl_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 4, i32 0
define <7 x i8> @vsrl_vv_v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v7i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <7 x i8> @llvm.vp.lshr.v7i8(<7 x i8> %va, <7 x i8> %b, <7 x i1> %m, i32 %evl)
define <8 x i8> @vsrl_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.lshr.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vsrl_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vsrl_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 4, i32 0
define <16 x i8> @vsrl_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.lshr.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vsrl_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vsrl_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 4, i32 0
define <2 x i16> @vsrl_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.lshr.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vsrl_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vsrl_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 4, i32 0
define <4 x i16> @vsrl_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.lshr.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vsrl_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vsrl_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 4, i32 0
define <8 x i16> @vsrl_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.lshr.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vsrl_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vsrl_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 4, i32 0
define <16 x i16> @vsrl_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.lshr.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vsrl_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vsrl_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 4, i32 0
define <2 x i32> @vsrl_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.lshr.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vsrl_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vsrl_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 4, i32 0
define <4 x i32> @vsrl_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.lshr.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vsrl_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vsrl_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 4, i32 0
define <8 x i32> @vsrl_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.lshr.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vsrl_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vsrl_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 4, i32 0
define <16 x i32> @vsrl_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.lshr.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vsrl_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vsrl_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 4, i32 0
define <2 x i64> @vsrl_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.lshr.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
define <2 x i64> @vsrl_vx_v2i64(<2 x i64> %va, i64 %b, <2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_v2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vsrl_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 4, i32 0
define <4 x i64> @vsrl_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.lshr.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
define <4 x i64> @vsrl_vx_v4i64(<4 x i64> %va, i64 %b, <4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_v4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vsrl_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 4, i32 0
define <8 x i64> @vsrl_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.lshr.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
define <8 x i64> @vsrl_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_v8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vsrl_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 4, i32 0
define <16 x i64> @vsrl_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.lshr.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
define <16 x i64> @vsrl_vx_v16i64(<16 x i64> %va, i64 %b, <16 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_v16i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vsrl_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 4, i32 0
define <8 x i7> @vsub_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.sub.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vsub_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.sub.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vsub_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <3 x i8> @vsub_vv_v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <3 x i8> @llvm.vp.sub.v3i8(<3 x i8> %va, <3 x i8> %b, <3 x i1> %m, i32 %evl)
define <3 x i8> @vsub_vx_v3i8(<3 x i8> %va, i8 %b, <3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <3 x i8> poison, i8 %b, i32 0
define <4 x i8> @vsub_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.sub.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vsub_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <8 x i8> @vsub_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.sub.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vsub_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <16 x i8> @vsub_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.sub.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vsub_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <2 x i16> @vsub_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.sub.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vsub_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <4 x i16> @vsub_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.sub.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vsub_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <8 x i16> @vsub_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.sub.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vsub_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <16 x i16> @vsub_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.sub.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vsub_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <2 x i32> @vsub_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.sub.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vsub_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <4 x i32> @vsub_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.sub.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vsub_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <8 x i32> @vsub_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.sub.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vsub_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <16 x i32> @vsub_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.sub.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vsub_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <2 x i64> @vsub_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.sub.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <4 x i64> @vsub_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.sub.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <8 x i64> @vsub_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.sub.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <16 x i64> @vsub_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.sub.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <8 x i7> @vxor_vv_v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i7> @llvm.vp.xor.v8i7(<8 x i7> %va, <8 x i7> %b, <8 x i1> %m, i32 %evl)
define <2 x i8> @vxor_vv_v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i8> @llvm.vp.xor.v2i8(<2 x i8> %va, <2 x i8> %b, <2 x i1> %m, i32 %evl)
define <2 x i8> @vxor_vx_v2i8(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vxor_vx_v2i8_commute(<2 x i8> %va, i8 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v2i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 %b, i32 0
define <2 x i8> @vxor_vi_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 7, i32 0
define <2 x i8> @vxor_vi_v2i8_1(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i8> poison, i8 -1, i32 0
define <4 x i8> @vxor_vv_v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i8> @llvm.vp.xor.v4i8(<4 x i8> %va, <4 x i8> %b, <4 x i1> %m, i32 %evl)
define <4 x i8> @vxor_vx_v4i8(<4 x i8> %va, i8 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 %b, i32 0
define <4 x i8> @vxor_vi_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 7, i32 0
define <4 x i8> @vxor_vi_v4i8_1(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i8> poison, i8 -1, i32 0
define <8 x i8> @vxor_vv_v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i8> @llvm.vp.xor.v8i8(<8 x i8> %va, <8 x i8> %b, <8 x i1> %m, i32 %evl)
define <8 x i8> @vxor_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 %b, i32 0
define <8 x i8> @vxor_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 7, i32 0
define <8 x i8> @vxor_vi_v8i8_1(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i8> poison, i8 -1, i32 0
define <9 x i8> @vxor_vv_v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v9i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <9 x i8> @llvm.vp.xor.v9i8(<9 x i8> %va, <9 x i8> %b, <9 x i1> %m, i32 %evl)
define <9 x i8> @vxor_vx_v9i8(<9 x i8> %va, i8 %b, <9 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v9i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <9 x i8> poison, i8 %b, i32 0
define <9 x i8> @vxor_vi_v9i8(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v9i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <9 x i8> poison, i8 7, i32 0
define <9 x i8> @vxor_vi_v9i8_1(<9 x i8> %va, <9 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v9i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <9 x i8> poison, i8 -1, i32 0
define <16 x i8> @vxor_vv_v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <16 x i8> @llvm.vp.xor.v16i8(<16 x i8> %va, <16 x i8> %b, <16 x i1> %m, i32 %evl)
define <16 x i8> @vxor_vx_v16i8(<16 x i8> %va, i8 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 %b, i32 0
define <16 x i8> @vxor_vi_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 7, i32 0
define <16 x i8> @vxor_vi_v16i8_1(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i8> poison, i8 -1, i32 0
define <2 x i16> @vxor_vv_v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i16> @llvm.vp.xor.v2i16(<2 x i16> %va, <2 x i16> %b, <2 x i1> %m, i32 %evl)
define <2 x i16> @vxor_vx_v2i16(<2 x i16> %va, i16 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 %b, i32 0
define <2 x i16> @vxor_vi_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 7, i32 0
define <2 x i16> @vxor_vi_v2i16_1(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i16> poison, i16 -1, i32 0
define <4 x i16> @vxor_vv_v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i16> @llvm.vp.xor.v4i16(<4 x i16> %va, <4 x i16> %b, <4 x i1> %m, i32 %evl)
define <4 x i16> @vxor_vx_v4i16(<4 x i16> %va, i16 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 %b, i32 0
define <4 x i16> @vxor_vi_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 7, i32 0
define <4 x i16> @vxor_vi_v4i16_1(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i16> poison, i16 -1, i32 0
define <8 x i16> @vxor_vv_v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <8 x i16> @llvm.vp.xor.v8i16(<8 x i16> %va, <8 x i16> %b, <8 x i1> %m, i32 %evl)
define <8 x i16> @vxor_vx_v8i16(<8 x i16> %va, i16 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 %b, i32 0
define <8 x i16> @vxor_vi_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 7, i32 0
define <8 x i16> @vxor_vi_v8i16_1(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i16> poison, i16 -1, i32 0
define <16 x i16> @vxor_vv_v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <16 x i16> @llvm.vp.xor.v16i16(<16 x i16> %va, <16 x i16> %b, <16 x i1> %m, i32 %evl)
define <16 x i16> @vxor_vx_v16i16(<16 x i16> %va, i16 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 %b, i32 0
define <16 x i16> @vxor_vi_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 7, i32 0
define <16 x i16> @vxor_vi_v16i16_1(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i16> poison, i16 -1, i32 0
define <2 x i32> @vxor_vv_v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i32> @llvm.vp.xor.v2i32(<2 x i32> %va, <2 x i32> %b, <2 x i1> %m, i32 %evl)
define <2 x i32> @vxor_vx_v2i32(<2 x i32> %va, i32 %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 %b, i32 0
define <2 x i32> @vxor_vi_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 7, i32 0
define <2 x i32> @vxor_vi_v2i32_1(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i32> poison, i32 -1, i32 0
define <4 x i32> @vxor_vv_v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <4 x i32> @llvm.vp.xor.v4i32(<4 x i32> %va, <4 x i32> %b, <4 x i1> %m, i32 %evl)
define <4 x i32> @vxor_vx_v4i32(<4 x i32> %va, i32 %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 %b, i32 0
define <4 x i32> @vxor_vi_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 7, i32 0
define <4 x i32> @vxor_vi_v4i32_1(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i32> poison, i32 -1, i32 0
define <8 x i32> @vxor_vv_v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <8 x i32> @llvm.vp.xor.v8i32(<8 x i32> %va, <8 x i32> %b, <8 x i1> %m, i32 %evl)
define <8 x i32> @vxor_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 %b, i32 0
define <8 x i32> @vxor_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 7, i32 0
define <8 x i32> @vxor_vi_v8i32_1(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i32> poison, i32 -1, i32 0
define <16 x i32> @vxor_vv_v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <16 x i32> @llvm.vp.xor.v16i32(<16 x i32> %va, <16 x i32> %b, <16 x i1> %m, i32 %evl)
define <16 x i32> @vxor_vx_v16i32(<16 x i32> %va, i32 %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 %b, i32 0
define <16 x i32> @vxor_vi_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 7, i32 0
define <16 x i32> @vxor_vi_v16i32_1(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i32> poison, i32 -1, i32 0
define <2 x i64> @vxor_vv_v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <2 x i64> @llvm.vp.xor.v2i64(<2 x i64> %va, <2 x i64> %b, <2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_v2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 %b, i32 0
define <2 x i64> @vxor_vi_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 7, i32 0
define <2 x i64> @vxor_vi_v2i64_1(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v2i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <2 x i64> poison, i64 -1, i32 0
define <4 x i64> @vxor_vv_v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <4 x i64> @llvm.vp.xor.v4i64(<4 x i64> %va, <4 x i64> %b, <4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 4, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_v4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 %b, i32 0
define <4 x i64> @vxor_vi_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 7, i32 0
define <4 x i64> @vxor_vi_v4i64_1(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v4i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <4 x i64> poison, i64 -1, i32 0
define <8 x i64> @vxor_vv_v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <8 x i64> @llvm.vp.xor.v8i64(<8 x i64> %va, <8 x i64> %b, <8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_v8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 %b, i32 0
define <8 x i64> @vxor_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 7, i32 0
define <8 x i64> @vxor_vi_v8i64_1(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v8i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <8 x i64> poison, i64 -1, i32 0
define <16 x i64> @vxor_vv_v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <16 x i64> @llvm.vp.xor.v16i64(<16 x i64> %va, <16 x i64> %b, <16 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetivli zero, 16, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_v16i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 %b, i32 0
define <16 x i64> @vxor_vi_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 7, i32 0
define <16 x i64> @vxor_vi_v16i64_1(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_v16i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <16 x i64> poison, i64 -1, i32 0
; CHECK-NEXT: srli a1, a0, 3
; CHECK-NEXT: srli a0, a0, 2
; CHECK-NEXT: add a1, a0, a1
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, ma
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vslideup.vx v22, v8, a0
; CHECK-NEXT: vmv8r.v v8, v16
; CHECK-NEXT: ret
define <vscale x 1 x half> @masked_load_nxv1f16(<vscale x 1 x half>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x half> @llvm.masked.load.nxv1f16(<vscale x 1 x half>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x half> undef)
define <vscale x 1 x float> @masked_load_nxv1f32(<vscale x 1 x float>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x float> @llvm.masked.load.nxv1f32(<vscale x 1 x float>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x float> undef)
define <vscale x 1 x double> @masked_load_nxv1f64(<vscale x 1 x double>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x double> @llvm.masked.load.nxv1f64(<vscale x 1 x double>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x double> undef)
define <vscale x 2 x half> @masked_load_nxv2f16(<vscale x 2 x half>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x half> @llvm.masked.load.nxv2f16(<vscale x 2 x half>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x half> undef)
define <vscale x 2 x float> @masked_load_nxv2f32(<vscale x 2 x float>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x float> @llvm.masked.load.nxv2f32(<vscale x 2 x float>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x float> undef)
define <vscale x 2 x double> @masked_load_nxv2f64(<vscale x 2 x double>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x double> @llvm.masked.load.nxv2f64(<vscale x 2 x double>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x double> undef)
define <vscale x 4 x half> @masked_load_nxv4f16(<vscale x 4 x half>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x half> @llvm.masked.load.nxv4f16(<vscale x 4 x half>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x half> undef)
define <vscale x 4 x float> @masked_load_nxv4f32(<vscale x 4 x float>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x float> @llvm.masked.load.nxv4f32(<vscale x 4 x float>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x float> undef)
define <vscale x 4 x double> @masked_load_nxv4f64(<vscale x 4 x double>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x double> @llvm.masked.load.nxv4f64(<vscale x 4 x double>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x double> undef)
define <vscale x 8 x half> @masked_load_nxv8f16(<vscale x 8 x half>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x half> @llvm.masked.load.nxv8f16(<vscale x 8 x half>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x half> undef)
define <vscale x 8 x float> @masked_load_nxv8f32(<vscale x 8 x float>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x float> @llvm.masked.load.nxv8f32(<vscale x 8 x float>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x float> undef)
define <vscale x 8 x double> @masked_load_nxv8f64(<vscale x 8 x double>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x double> @llvm.masked.load.nxv8f64(<vscale x 8 x double>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x double> undef)
define <vscale x 16 x half> @masked_load_nxv16f16(<vscale x 16 x half>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x half> @llvm.masked.load.nxv16f16(<vscale x 16 x half>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x half> undef)
define <vscale x 16 x float> @masked_load_nxv16f32(<vscale x 16 x float>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x float> @llvm.masked.load.nxv16f32(<vscale x 16 x float>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x float> undef)
define <vscale x 32 x half> @masked_load_nxv32f16(<vscale x 32 x half>* %a, <vscale x 32 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 32 x half> @llvm.masked.load.nxv32f16(<vscale x 32 x half>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x half> undef)
define <vscale x 1 x i8> @masked_load_nxv1i8(<vscale x 1 x i8>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.masked.load.nxv1i8(<vscale x 1 x i8>* %a, i32 1, <vscale x 1 x i1> %mask, <vscale x 1 x i8> undef)
define <vscale x 1 x i16> @masked_load_nxv1i16(<vscale x 1 x i16>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i16> @llvm.masked.load.nxv1i16(<vscale x 1 x i16>* %a, i32 2, <vscale x 1 x i1> %mask, <vscale x 1 x i16> undef)
define <vscale x 1 x i32> @masked_load_nxv1i32(<vscale x 1 x i32>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i32> @llvm.masked.load.nxv1i32(<vscale x 1 x i32>* %a, i32 4, <vscale x 1 x i1> %mask, <vscale x 1 x i32> undef)
define <vscale x 1 x i64> @masked_load_nxv1i64(<vscale x 1 x i64>* %a, <vscale x 1 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i64> @llvm.masked.load.nxv1i64(<vscale x 1 x i64>* %a, i32 8, <vscale x 1 x i1> %mask, <vscale x 1 x i64> undef)
define <vscale x 2 x i8> @masked_load_nxv2i8(<vscale x 2 x i8>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.masked.load.nxv2i8(<vscale x 2 x i8>* %a, i32 1, <vscale x 2 x i1> %mask, <vscale x 2 x i8> undef)
define <vscale x 2 x i16> @masked_load_nxv2i16(<vscale x 2 x i16>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.masked.load.nxv2i16(<vscale x 2 x i16>* %a, i32 2, <vscale x 2 x i1> %mask, <vscale x 2 x i16> undef)
define <vscale x 2 x i32> @masked_load_nxv2i32(<vscale x 2 x i32>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.masked.load.nxv2i32(<vscale x 2 x i32>* %a, i32 4, <vscale x 2 x i1> %mask, <vscale x 2 x i32> undef)
define <vscale x 2 x i64> @masked_load_nxv2i64(<vscale x 2 x i64>* %a, <vscale x 2 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i64> @llvm.masked.load.nxv2i64(<vscale x 2 x i64>* %a, i32 8, <vscale x 2 x i1> %mask, <vscale x 2 x i64> undef)
define <vscale x 4 x i8> @masked_load_nxv4i8(<vscale x 4 x i8>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.masked.load.nxv4i8(<vscale x 4 x i8>* %a, i32 1, <vscale x 4 x i1> %mask, <vscale x 4 x i8> undef)
define <vscale x 4 x i16> @masked_load_nxv4i16(<vscale x 4 x i16>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.masked.load.nxv4i16(<vscale x 4 x i16>* %a, i32 2, <vscale x 4 x i1> %mask, <vscale x 4 x i16> undef)
define <vscale x 4 x i32> @masked_load_nxv4i32(<vscale x 4 x i32>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i32> @llvm.masked.load.nxv4i32(<vscale x 4 x i32>* %a, i32 4, <vscale x 4 x i1> %mask, <vscale x 4 x i32> undef)
define <vscale x 4 x i64> @masked_load_nxv4i64(<vscale x 4 x i64>* %a, <vscale x 4 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i64> @llvm.masked.load.nxv4i64(<vscale x 4 x i64>* %a, i32 8, <vscale x 4 x i1> %mask, <vscale x 4 x i64> undef)
define <vscale x 8 x i8> @masked_load_nxv8i8(<vscale x 8 x i8>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.masked.load.nxv8i8(<vscale x 8 x i8>* %a, i32 1, <vscale x 8 x i1> %mask, <vscale x 8 x i8> undef)
define <vscale x 8 x i16> @masked_load_nxv8i16(<vscale x 8 x i16>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i16> @llvm.masked.load.nxv8i16(<vscale x 8 x i16>* %a, i32 2, <vscale x 8 x i1> %mask, <vscale x 8 x i16> undef)
define <vscale x 8 x i32> @masked_load_nxv8i32(<vscale x 8 x i32>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i32> @llvm.masked.load.nxv8i32(<vscale x 8 x i32>* %a, i32 4, <vscale x 8 x i1> %mask, <vscale x 8 x i32> undef)
define <vscale x 8 x i64> @masked_load_nxv8i64(<vscale x 8 x i64>* %a, <vscale x 8 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i64> @llvm.masked.load.nxv8i64(<vscale x 8 x i64>* %a, i32 8, <vscale x 8 x i1> %mask, <vscale x 8 x i64> undef)
define <vscale x 16 x i8> @masked_load_nxv16i8(<vscale x 16 x i8>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x i8> @llvm.masked.load.nxv16i8(<vscale x 16 x i8>* %a, i32 1, <vscale x 16 x i1> %mask, <vscale x 16 x i8> undef)
define <vscale x 16 x i16> @masked_load_nxv16i16(<vscale x 16 x i16>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x i16> @llvm.masked.load.nxv16i16(<vscale x 16 x i16>* %a, i32 2, <vscale x 16 x i1> %mask, <vscale x 16 x i16> undef)
define <vscale x 16 x i32> @masked_load_nxv16i32(<vscale x 16 x i32>* %a, <vscale x 16 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e32, m8, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 16 x i32> @llvm.masked.load.nxv16i32(<vscale x 16 x i32>* %a, i32 4, <vscale x 16 x i1> %mask, <vscale x 16 x i32> undef)
define <vscale x 32 x i8> @masked_load_nxv32i8(<vscale x 32 x i8>* %a, <vscale x 32 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 32 x i8> @llvm.masked.load.nxv32i8(<vscale x 32 x i8>* %a, i32 1, <vscale x 32 x i1> %mask, <vscale x 32 x i8> undef)
define <vscale x 32 x i16> @masked_load_nxv32i16(<vscale x 32 x i16>* %a, <vscale x 32 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e16, m8, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 32 x i16> @llvm.masked.load.nxv32i16(<vscale x 32 x i16>* %a, i32 2, <vscale x 32 x i1> %mask, <vscale x 32 x i16> undef)
define <vscale x 64 x i8> @masked_load_nxv64i8(<vscale x 64 x i8>* %a, <vscale x 64 x i1> %mask) nounwind {
; CHECK-LABEL: masked_load_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli a1, zero, e8, m8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 64 x i8> @llvm.masked.load.nxv64i8(<vscale x 64 x i8>* %a, i32 1, <vscale x 64 x i1> %mask, <vscale x 64 x i8> undef)
; CHECK-NEXT: .LBB46_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB47_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB48_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v9, v8, v9, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-NEXT: .LBB49_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB50_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB51_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB52_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB53_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB54_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB55_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB56_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB57_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB58_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB59_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB60_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB61_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB62_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB63_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v8, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a1, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v8, (a0)
; CHECK-NEXT: .LBB64_1: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vle32.v v9, (a0)
-; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-NEXT: vrem.vv v9, v8, v9, v0.t
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vse32.v v9, (a0)
; CHECK-LABEL: strided_load_i8:
; CHECK: # %bb.0:
; CHECK-NEXT: li a2, 32
-; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 %stride, <32 x i1> %m)
define <2 x i64> @strided_load_i64(ptr %p, i64 %stride, <2 x i1> %m) {
; CHECK-LABEL: strided_load_i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, mu
+; CHECK-NEXT: vsetivli zero, 2, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%res = call <2 x i64> @llvm.riscv.masked.strided.load.v2i64.p0.i64(<2 x i64> undef, ptr %p, i64 %stride, <2 x i1> %m)
; CHECK-LABEL: strided_load_i8_splat:
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), zero, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 0, <32 x i1> %m)
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: li a2, -1
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 -1, <32 x i1> %m)
; CHECK: # %bb.0:
; CHECK-NEXT: li a1, 32
; CHECK-NEXT: li a2, 1
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vlse8.v v8, (a0), a2, v0.t
; CHECK-NEXT: ret
%res = call <32 x i8> @llvm.riscv.masked.strided.load.v32i8.p0.i64(<32 x i8> undef, ptr %p, i64 1, <32 x i1> %m)
define <vscale x 1 x i64> @strided_load_vscale_i64(ptr %p, i64 %stride, <vscale x 1 x i1> %m) {
; CHECK-LABEL: strided_load_vscale_i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli a2, zero, e64, m1, ta, ma
; CHECK-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-NEXT: ret
%res = call <vscale x 1 x i64> @llvm.riscv.masked.strided.load.nxv1i64.p0.i64(<vscale x 1 x i64> undef, ptr %p, i64 %stride, <vscale x 1 x i1> %m)
define <vscale x 1 x i8> @strided_vpload_nxv1i8_i8(i8* %ptr, i8 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i8(i8* %ptr, i8 %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @strided_vpload_nxv1i8_i16(i8* %ptr, i16 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i16(i8* %ptr, i16 %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @strided_vpload_nxv1i8_i64(i8* %ptr, i64 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8_i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a3, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8_i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i64(i8* %ptr, i64 %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @strided_vpload_nxv1i8(i8* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf8, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.experimental.vp.strided.load.nxv1i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @strided_vpload_nxv2i8(i8* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf4, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.experimental.vp.strided.load.nxv2i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @strided_vpload_nxv4i8(i8* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, mf2, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.experimental.vp.strided.load.nxv4i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @strided_vpload_nxv8i8(i8* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i8:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV32-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i8:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; CHECK-RV64-NEXT: vlse8.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.experimental.vp.strided.load.nxv8i8.p0i8.i32(i8* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @strided_vpload_nxv1i16(i16* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i16> @llvm.experimental.vp.strided.load.nxv1i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @strided_vpload_nxv2i16(i16* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.experimental.vp.strided.load.nxv2i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @strided_vpload_nxv4i16(i16* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.experimental.vp.strided.load.nxv4i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @strided_vpload_nxv8i16(i16* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x i16> @llvm.experimental.vp.strided.load.nxv8i16.p0i16.i32(i16* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @strided_vpload_nxv1i32(i32* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i32> @llvm.experimental.vp.strided.load.nxv1i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @strided_vpload_nxv2i32(i32* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.experimental.vp.strided.load.nxv2i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @strided_vpload_nxv4i32(i32* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x i32> @llvm.experimental.vp.strided.load.nxv4i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @strided_vpload_nxv8i32(i32* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x i32> @llvm.experimental.vp.strided.load.nxv8i32.p0i32.i32(i32* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x i64> @strided_vpload_nxv1i64(i64* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x i64> @llvm.experimental.vp.strided.load.nxv1i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i64> @strided_vpload_nxv2i64(i64* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x i64> @llvm.experimental.vp.strided.load.nxv2i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i64> @strided_vpload_nxv4i64(i64* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x i64> @llvm.experimental.vp.strided.load.nxv4i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @strided_vpload_nxv8i64(i64* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8i64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8i64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x i64> @llvm.experimental.vp.strided.load.nxv8i64.p0i64.i32(i64* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x half> @strided_vpload_nxv1f16(half* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf4, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x half> @llvm.experimental.vp.strided.load.nxv1f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x half> @strided_vpload_nxv2f16(half* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, mf2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x half> @llvm.experimental.vp.strided.load.nxv2f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x half> @strided_vpload_nxv4f16(half* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m1, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x half> @llvm.experimental.vp.strided.load.nxv4f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x half> @strided_vpload_nxv8f16(half* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f16:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV32-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f16:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e16, m2, ta, ma
; CHECK-RV64-NEXT: vlse16.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x half> @llvm.experimental.vp.strided.load.nxv8f16.p0f16.i32(half* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x float> @strided_vpload_nxv1f32(float* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, mf2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x float> @llvm.experimental.vp.strided.load.nxv1f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x float> @strided_vpload_nxv2f32(float* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m1, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x float> @llvm.experimental.vp.strided.load.nxv2f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x float> @strided_vpload_nxv4f32(float* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m2, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x float> @llvm.experimental.vp.strided.load.nxv4f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x float> @strided_vpload_nxv8f32(float* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f32:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV32-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f32:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e32, m4, ta, ma
; CHECK-RV64-NEXT: vlse32.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x float> @llvm.experimental.vp.strided.load.nxv8f32.p0f32.i32(float* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x double> @strided_vpload_nxv1f64(double* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv1f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv1f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 1 x double> @llvm.experimental.vp.strided.load.nxv1f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x double> @strided_vpload_nxv2f64(double* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv2f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv2f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 2 x double> @llvm.experimental.vp.strided.load.nxv2f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x double> @strided_vpload_nxv4f64(double* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv4f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv4f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 4 x double> @llvm.experimental.vp.strided.load.nxv4f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x double> @strided_vpload_nxv8f64(double* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv8f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv8f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%load = call <vscale x 8 x double> @llvm.experimental.vp.strided.load.nxv8f64.p0f64.i32(double* %ptr, i32 signext %stride, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 3 x double> @strided_vpload_nxv3f64(double* %ptr, i32 signext %stride, <vscale x 3 x i1> %mask, i32 zeroext %evl) {
; CHECK-RV32-LABEL: strided_vpload_nxv3f64:
; CHECK-RV32: # %bb.0:
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
;
; CHECK-RV64-LABEL: strided_vpload_nxv3f64:
; CHECK-RV64: # %bb.0:
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
%v = call <vscale x 3 x double> @llvm.experimental.vp.strided.load.nxv3f64.p0f64.i32(double* %ptr, i32 %stride, <vscale x 3 x i1> %mask, i32 %evl)
; CHECK-RV32-NEXT: .LBB42_4:
; CHECK-RV32-NEXT: mul a4, a3, a1
; CHECK-RV32-NEXT: add a4, a0, a4
-; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (a4), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: ret
; CHECK-RV64-NEXT: .LBB42_4:
; CHECK-RV64-NEXT: mul a4, a2, a1
; CHECK-RV64-NEXT: add a4, a0, a4
-; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (a4), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: ret
; CHECK-RV32-NEXT: .LBB44_6:
; CHECK-RV32-NEXT: mul t1, a6, a1
; CHECK-RV32-NEXT: add t1, a0, t1
-; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV32-NEXT: li t0, 0
; CHECK-RV32-NEXT: sub t1, a3, a7
; CHECK-RV32-NEXT: .LBB44_10:
; CHECK-RV32-NEXT: mul a2, a5, a1
; CHECK-RV32-NEXT: add a2, a0, a2
-; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV32-NEXT: vlse64.v v24, (a2), a1, v0.t
-; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, mu
+; CHECK-RV32-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-RV32-NEXT: vmv1r.v v0, v8
; CHECK-RV32-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV32-NEXT: vs1r.v v24, (a4)
; CHECK-RV64-NEXT: .LBB44_6:
; CHECK-RV64-NEXT: mul t1, a6, a1
; CHECK-RV64-NEXT: add t1, a0, t1
-; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v16, (t1), a1, v0.t
; CHECK-RV64-NEXT: li t0, 0
; CHECK-RV64-NEXT: sub t1, a2, a7
; CHECK-RV64-NEXT: .LBB44_10:
; CHECK-RV64-NEXT: mul a2, a5, a1
; CHECK-RV64-NEXT: add a2, a0, a2
-; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, t0, e64, m8, ta, ma
; CHECK-RV64-NEXT: vlse64.v v24, (a2), a1, v0.t
-; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, mu
+; CHECK-RV64-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-RV64-NEXT: vmv1r.v v0, v8
; CHECK-RV64-NEXT: vlse64.v v8, (a0), a1, v0.t
; CHECK-RV64-NEXT: vs1r.v v24, (a3)
define <vscale x 8 x i7> @vadd_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vadd_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.add.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vadd_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vadd_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv1i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vadd_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
define <vscale x 2 x i8> @vadd_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.add.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vadd_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vadd_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
define <vscale x 3 x i8> @vadd_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.add.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 3 x i8> @vadd_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vadd_vi_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 3 x i8> poison, i8 -1, i32 0
define <vscale x 4 x i8> @vadd_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.add.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vadd_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vadd_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
define <vscale x 8 x i8> @vadd_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.add.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vadd_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vadd_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
define <vscale x 16 x i8> @vadd_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.add.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vadd_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vadd_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
define <vscale x 32 x i8> @vadd_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.add.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vadd_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vadd_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
define <vscale x 64 x i8> @vadd_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.add.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vadd_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vadd_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
; CHECK-NEXT: li a4, 0
; CHECK-NEXT: vsetvli a5, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a0)
-; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e8, m8, ta, ma
; CHECK-NEXT: sub a0, a1, a2
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: bltu a1, a0, .LBB50_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a4, a0
; CHECK-NEXT: .LBB50_4:
-; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i16> @vadd_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.add.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vadd_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vadd_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
define <vscale x 2 x i16> @vadd_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.add.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vadd_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vadd_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
define <vscale x 4 x i16> @vadd_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.add.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vadd_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vadd_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
define <vscale x 8 x i16> @vadd_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.add.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vadd_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vadd_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
define <vscale x 16 x i16> @vadd_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.add.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vadd_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vadd_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
define <vscale x 32 x i16> @vadd_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.add.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vadd_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vadd_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
define <vscale x 1 x i32> @vadd_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.add.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vadd_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vadd_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
define <vscale x 2 x i32> @vadd_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.add.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vadd_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vadd_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
define <vscale x 4 x i32> @vadd_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.add.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vadd_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vadd_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
define <vscale x 8 x i32> @vadd_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.add.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vadd_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vadd_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
define <vscale x 16 x i32> @vadd_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.add.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vadd_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vadd.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vadd_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB118_2:
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB118_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB118_4:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a2, a3
; CHECK-NEXT: .LBB120_2:
-; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a2, e32, m8, ta, ma
; CHECK-NEXT: vadd.vi v16, v16, -1, v0.t
; CHECK-NEXT: bltu a0, a1, .LBB120_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a0, a1
; CHECK-NEXT: .LBB120_4:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
; RV32: # %bb.0:
; RV32-NEXT: csrr a0, vlenb
; RV32-NEXT: slli a0, a0, 1
-; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV32-NEXT: vadd.vi v8, v8, -1, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e8, mf2, ta, ma
; RV64-NEXT: vslidedown.vx v24, v0, a1
; RV64-NEXT: slli a0, a0, 1
-; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; RV64-NEXT: vadd.vi v8, v8, -1, v0.t
-; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, mu
+; RV64-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vadd.vi v16, v16, -1, v0.t
; RV64-NEXT: ret
define <vscale x 1 x i64> @vadd_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.add.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vadd_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
define <vscale x 2 x i64> @vadd_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.add.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vadd_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
define <vscale x 4 x i64> @vadd_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.add.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vadd_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
define <vscale x 8 x i64> @vadd_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.add.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vadd.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vadd_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vadd.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vadd_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vadd_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vadd.vi v8, v8, -1, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0
define <vscale x 8 x i7> @vand_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vand_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.and.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vand_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vand_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
define <vscale x 2 x i8> @vand_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.and.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vand_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vand_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 4, i32 0
define <vscale x 4 x i8> @vand_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.and.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vand_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vand_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 4, i32 0
define <vscale x 8 x i8> @vand_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.and.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vand_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vand_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
define <vscale x 16 x i8> @vand_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.and.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vand_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vand_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 4, i32 0
define <vscale x 32 x i8> @vand_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.and.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vand_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vand_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 4, i32 0
define <vscale x 64 x i8> @vand_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.and.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vand_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vand_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 4, i32 0
define <vscale x 1 x i16> @vand_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.and.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vand_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vand_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 4, i32 0
define <vscale x 2 x i16> @vand_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.and.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vand_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vand_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 4, i32 0
define <vscale x 4 x i16> @vand_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.and.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vand_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vand_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 4, i32 0
define <vscale x 8 x i16> @vand_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.and.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vand_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vand_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
define <vscale x 14 x i16> @vand_vv_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv14i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 14 x i16> @llvm.vp.and.nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i16> %b, <vscale x 14 x i1> %m, i32 %evl)
define <vscale x 14 x i16> @vand_vx_nxv14i16(<vscale x 14 x i16> %va, i16 %b, <vscale x 14 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv14i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 14 x i16> poison, i16 %b, i32 0
define <vscale x 14 x i16> @vand_vi_nxv14i16(<vscale x 14 x i16> %va, <vscale x 14 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv14i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 14 x i16> poison, i16 4, i32 0
define <vscale x 16 x i16> @vand_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.and.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vand_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vand_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 4, i32 0
define <vscale x 32 x i16> @vand_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.and.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vand_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vand_vx_nxv32i16_commute(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv32i16_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vand_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 4, i32 0
define <vscale x 1 x i32> @vand_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.and.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vand_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vand_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
define <vscale x 2 x i32> @vand_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.and.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vand_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vand_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
define <vscale x 4 x i32> @vand_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.and.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vand_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vand_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
define <vscale x 8 x i32> @vand_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.and.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vand_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vand_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
define <vscale x 16 x i32> @vand_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.and.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vand_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vand.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vand_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 4, i32 0
define <vscale x 1 x i64> @vand_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.and.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vand.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vand_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
define <vscale x 2 x i64> @vand_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.and.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vand.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vand_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
define <vscale x 4 x i64> @vand_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.and.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vand.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vand_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 4, i32 0
define <vscale x 8 x i64> @vand_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vand.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.and.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vand.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vand_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vand.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vand_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vand_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
define <vscale x 1 x half> @vfsgnj_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.copysign.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfsgnj_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.copysign.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfsgnj_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.copysign.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfsgnj_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.copysign.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfsgnj_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.copysign.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfsgnj_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.copysign.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfsgnj_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.copysign.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfsgnj_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.copysign.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfsgnj_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.copysign.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfsgnj_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.copysign.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfsgnj_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.copysign.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfsgnj_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.copysign.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfsgnj_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.copysign.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfsgnj_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.copysign.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfsgnj_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsgnj_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsgnj.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.copysign.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vadd.vv v9, v9, v9
; CHECK-NEXT: vsra.vi v9, v9, 1
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vdiv_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.sdiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vdiv_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vdiv_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.sdiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vdiv_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vdiv_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.sdiv.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vdiv_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.sdiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vdiv_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vdiv_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.sdiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vdiv_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vdiv_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.sdiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vdiv_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vdiv_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.sdiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vdiv_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vdiv_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.sdiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vdiv_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i16> @vdiv_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.sdiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vdiv_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vdiv_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.sdiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vdiv_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vdiv_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.sdiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vdiv_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vdiv_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.sdiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vdiv_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vdiv_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.sdiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vdiv_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vdiv_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.sdiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vdiv_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vdiv_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.sdiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vdiv_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vdiv_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.sdiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vdiv_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vdiv_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.sdiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vdiv_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vdiv_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.sdiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vdiv_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vdiv_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.sdiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vdiv_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vdiv.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i64> @vdiv_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.sdiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vdiv_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.sdiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vdiv_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.sdiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vdiv_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdiv_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.sdiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vdiv.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdiv_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vdiv.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vand.vx v8, v8, a2
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vand.vx v9, v9, a2
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vdivu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.udiv.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vdivu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vdivu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.udiv.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vdivu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vdivu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.udiv.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vdivu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.udiv.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vdivu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vdivu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.udiv.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vdivu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vdivu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.udiv.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vdivu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vdivu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.udiv.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vdivu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vdivu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.udiv.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vdivu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i16> @vdivu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.udiv.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vdivu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vdivu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.udiv.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vdivu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vdivu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.udiv.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vdivu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vdivu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.udiv.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vdivu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vdivu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.udiv.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vdivu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vdivu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.udiv.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vdivu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vdivu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.udiv.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vdivu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vdivu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.udiv.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vdivu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vdivu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.udiv.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vdivu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vdivu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.udiv.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vdivu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vdivu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.udiv.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vdivu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vdivu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i64> @vdivu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.udiv.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vdivu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.udiv.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vdivu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.udiv.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vdivu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vdivu_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vdivu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.udiv.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vdivu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vdivu_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vdivu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 1 x half> @vfadd_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fadd.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vfadd_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 1 x half> @vfadd_vf_nxv1f16_commute(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv1f16_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 2 x half> @vfadd_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fadd.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfadd_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
define <vscale x 4 x half> @vfadd_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fadd.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfadd_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
define <vscale x 8 x half> @vfadd_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fadd.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfadd_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
define <vscale x 16 x half> @vfadd_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fadd.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfadd_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
define <vscale x 32 x half> @vfadd_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fadd.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfadd_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
define <vscale x 1 x float> @vfadd_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.fadd.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfadd_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
define <vscale x 2 x float> @vfadd_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.fadd.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfadd_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
define <vscale x 4 x float> @vfadd_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.fadd.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfadd_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
define <vscale x 8 x float> @vfadd_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.fadd.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfadd_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
define <vscale x 16 x float> @vfadd_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.fadd.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfadd_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
define <vscale x 1 x double> @vfadd_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.fadd.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfadd_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
define <vscale x 2 x double> @vfadd_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.fadd.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfadd_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
define <vscale x 4 x double> @vfadd_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.fadd.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfadd_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
define <vscale x 7 x double> @vfadd_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.fadd.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfadd_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfadd.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.fadd.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfadd_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfadd_vf_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfadd.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
define <vscale x 1 x half> @vfdiv_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fdiv.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vfdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 2 x half> @vfdiv_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fdiv.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
define <vscale x 4 x half> @vfdiv_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fdiv.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
define <vscale x 8 x half> @vfdiv_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fdiv.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
define <vscale x 16 x half> @vfdiv_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fdiv.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
define <vscale x 32 x half> @vfdiv_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fdiv.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
define <vscale x 1 x float> @vfdiv_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.fdiv.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
define <vscale x 2 x float> @vfdiv_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.fdiv.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
define <vscale x 4 x float> @vfdiv_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.fdiv.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
define <vscale x 8 x float> @vfdiv_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.fdiv.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
define <vscale x 16 x float> @vfdiv_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.fdiv.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
define <vscale x 1 x double> @vfdiv_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.fdiv.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
define <vscale x 2 x double> @vfdiv_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.fdiv.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
define <vscale x 4 x double> @vfdiv_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.fdiv.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
define <vscale x 7 x double> @vfdiv_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.fdiv.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfdiv_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfdiv.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.fdiv.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfdiv_vf_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
define <vscale x 1 x half> @vfmax_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.maxnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfmax_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.maxnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfmax_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.maxnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfmax_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.maxnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfmax_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.maxnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfmax_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.maxnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfmax_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.maxnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfmax_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.maxnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfmax_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.maxnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfmax_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.maxnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfmax_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.maxnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfmax_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.maxnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfmax_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.maxnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfmax_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.maxnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfmax_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmax_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.maxnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vfmin_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.minnum.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfmin_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.minnum.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfmin_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.minnum.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfmin_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.minnum.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfmin_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.minnum.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %vb, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfmin_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.minnum.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %vb, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfmin_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.minnum.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfmin_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.minnum.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfmin_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.minnum.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfmin_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.minnum.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfmin_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.minnum.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %vb, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfmin_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.minnum.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %vb, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfmin_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.minnum.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %vb, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfmin_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.minnum.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %vb, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfmin_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmin_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.minnum.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %vb, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vfmul_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fmul.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vfmul_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 2 x half> @vfmul_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fmul.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfmul_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
define <vscale x 4 x half> @vfmul_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fmul.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfmul_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
define <vscale x 8 x half> @vfmul_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fmul.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfmul_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
define <vscale x 16 x half> @vfmul_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fmul.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfmul_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
define <vscale x 32 x half> @vfmul_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fmul.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfmul_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
define <vscale x 1 x float> @vfmul_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.fmul.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfmul_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
define <vscale x 2 x float> @vfmul_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.fmul.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfmul_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
define <vscale x 4 x float> @vfmul_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.fmul.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfmul_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
define <vscale x 8 x float> @vfmul_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.fmul.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfmul_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
define <vscale x 16 x float> @vfmul_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.fmul.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfmul_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
define <vscale x 1 x double> @vfmul_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.fmul.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfmul_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
define <vscale x 2 x double> @vfmul_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.fmul.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfmul_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
define <vscale x 4 x double> @vfmul_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.fmul.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfmul_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
define <vscale x 7 x double> @vfmul_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.fmul.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfmul_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.fmul.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfmul_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfmul_vf_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfmul.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
define <vscale x 1 x half> @vfrdiv_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 2 x half> @vfrdiv_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
define <vscale x 4 x half> @vfrdiv_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
define <vscale x 8 x half> @vfrdiv_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
define <vscale x 16 x half> @vfrdiv_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
define <vscale x 32 x half> @vfrdiv_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
define <vscale x 1 x float> @vfrdiv_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
define <vscale x 2 x float> @vfrdiv_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
define <vscale x 4 x float> @vfrdiv_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
define <vscale x 8 x float> @vfrdiv_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
define <vscale x 16 x float> @vfrdiv_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
define <vscale x 1 x double> @vfrdiv_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
define <vscale x 2 x double> @vfrdiv_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
define <vscale x 4 x double> @vfrdiv_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
define <vscale x 8 x double> @vfrdiv_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrdiv_vf_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
define <vscale x 1 x half> @vfrsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 2 x half> @vfrsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
define <vscale x 4 x half> @vfrsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
define <vscale x 8 x half> @vfrsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
define <vscale x 16 x half> @vfrsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
define <vscale x 32 x half> @vfrsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
define <vscale x 1 x float> @vfrsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
define <vscale x 2 x float> @vfrsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
define <vscale x 4 x float> @vfrsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
define <vscale x 8 x float> @vfrsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
define <vscale x 16 x float> @vfrsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
define <vscale x 1 x double> @vfrsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
define <vscale x 2 x double> @vfrsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
define <vscale x 4 x double> @vfrsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
define <vscale x 8 x double> @vfrsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfrsub_vf_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfrsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
define <vscale x 1 x half> @vfsub_vv_nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x half> @llvm.vp.fsub.nxv1f16(<vscale x 1 x half> %va, <vscale x 1 x half> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vfsub_vf_nxv1f16(<vscale x 1 x half> %va, half %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x half> poison, half %b, i32 0
define <vscale x 2 x half> @vfsub_vv_nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x half> @llvm.vp.fsub.nxv2f16(<vscale x 2 x half> %va, <vscale x 2 x half> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vfsub_vf_nxv2f16(<vscale x 2 x half> %va, half %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x half> poison, half %b, i32 0
define <vscale x 4 x half> @vfsub_vv_nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x half> @llvm.vp.fsub.nxv4f16(<vscale x 4 x half> %va, <vscale x 4 x half> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vfsub_vf_nxv4f16(<vscale x 4 x half> %va, half %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x half> poison, half %b, i32 0
define <vscale x 8 x half> @vfsub_vv_nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x half> @llvm.vp.fsub.nxv8f16(<vscale x 8 x half> %va, <vscale x 8 x half> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vfsub_vf_nxv8f16(<vscale x 8 x half> %va, half %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x half> poison, half %b, i32 0
define <vscale x 16 x half> @vfsub_vv_nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x half> @llvm.vp.fsub.nxv16f16(<vscale x 16 x half> %va, <vscale x 16 x half> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x half> @vfsub_vf_nxv16f16(<vscale x 16 x half> %va, half %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv16f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x half> poison, half %b, i32 0
define <vscale x 32 x half> @vfsub_vv_nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x half> @llvm.vp.fsub.nxv32f16(<vscale x 32 x half> %va, <vscale x 32 x half> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x half> @vfsub_vf_nxv32f16(<vscale x 32 x half> %va, half %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv32f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x half> poison, half %b, i32 0
define <vscale x 1 x float> @vfsub_vv_nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x float> @llvm.vp.fsub.nxv1f32(<vscale x 1 x float> %va, <vscale x 1 x float> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vfsub_vf_nxv1f32(<vscale x 1 x float> %va, float %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x float> poison, float %b, i32 0
define <vscale x 2 x float> @vfsub_vv_nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x float> @llvm.vp.fsub.nxv2f32(<vscale x 2 x float> %va, <vscale x 2 x float> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vfsub_vf_nxv2f32(<vscale x 2 x float> %va, float %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x float> poison, float %b, i32 0
define <vscale x 4 x float> @vfsub_vv_nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x float> @llvm.vp.fsub.nxv4f32(<vscale x 4 x float> %va, <vscale x 4 x float> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vfsub_vf_nxv4f32(<vscale x 4 x float> %va, float %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x float> poison, float %b, i32 0
define <vscale x 8 x float> @vfsub_vv_nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x float> @llvm.vp.fsub.nxv8f32(<vscale x 8 x float> %va, <vscale x 8 x float> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vfsub_vf_nxv8f32(<vscale x 8 x float> %va, float %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x float> poison, float %b, i32 0
define <vscale x 16 x float> @vfsub_vv_nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x float> @llvm.vp.fsub.nxv16f32(<vscale x 16 x float> %va, <vscale x 16 x float> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x float> @vfsub_vf_nxv16f32(<vscale x 16 x float> %va, float %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv16f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x float> poison, float %b, i32 0
define <vscale x 1 x double> @vfsub_vv_nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.fsub.nxv1f64(<vscale x 1 x double> %va, <vscale x 1 x double> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vfsub_vf_nxv1f64(<vscale x 1 x double> %va, double %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x double> poison, double %b, i32 0
define <vscale x 2 x double> @vfsub_vv_nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.fsub.nxv2f64(<vscale x 2 x double> %va, <vscale x 2 x double> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vfsub_vf_nxv2f64(<vscale x 2 x double> %va, double %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x double> poison, double %b, i32 0
define <vscale x 4 x double> @vfsub_vv_nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.fsub.nxv4f64(<vscale x 4 x double> %va, <vscale x 4 x double> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vfsub_vf_nxv4f64(<vscale x 4 x double> %va, double %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x double> poison, double %b, i32 0
define <vscale x 7 x double> @vfsub_vv_nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv7f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 7 x double> @llvm.vp.fsub.nxv7f64(<vscale x 7 x double> %va, <vscale x 7 x double> %b, <vscale x 7 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfsub_vv_nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vv_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.fsub.nxv8f64(<vscale x 8 x double> %va, <vscale x 8 x double> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vfsub_vf_nxv8f64(<vscale x 8 x double> %va, double %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vfsub_vf_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vfsub.vf v8, v8, fa0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x double> poison, double %b, i32 0
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vadd.vv v9, v9, v9
; CHECK-NEXT: vsra.vi v9, v9, 1
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.smax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vmax_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv1i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vmax_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.smax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vmax_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.smax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 3 x i8> @vmax_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vmax_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.smax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vmax_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.smax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vmax_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.smax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vmax_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.smax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vmax_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.smax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: li a5, 0
; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
-; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: sub a1, a2, a3
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB34_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB34_4:
-; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i16> @vmax_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.smax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vmax_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.smax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vmax_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.smax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vmax_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.smax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vmax_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.smax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vmax_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.smax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vmax_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.smax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vmax_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.smax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vmax_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.smax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vmax_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.smax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vmax_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.smax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB80_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB82_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v24, v0, a2
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmax.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmax.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.smax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.smax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.smax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmax_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.smax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmax.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmax_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmax.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vand.vx v8, v8, a2
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vand.vx v9, v9, a2
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vmaxu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.umax.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vmaxu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vmaxu_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv1i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vmaxu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.umax.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vmaxu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vmaxu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.umax.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 3 x i8> @vmaxu_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vmaxu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.umax.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vmaxu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vmaxu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.umax.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vmaxu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vmaxu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.umax.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vmaxu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vmaxu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.umax.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vmaxu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vmaxu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.umax.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vmaxu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: li a5, 0
; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
-; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: sub a1, a2, a3
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB34_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB34_4:
-; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i16> @vmaxu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.umax.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vmaxu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vmaxu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.umax.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vmaxu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vmaxu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.umax.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vmaxu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vmaxu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.umax.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vmaxu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vmaxu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.umax.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vmaxu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vmaxu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.umax.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vmaxu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vmaxu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.umax.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vmaxu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vmaxu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.umax.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vmaxu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vmaxu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.umax.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vmaxu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vmaxu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.umax.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vmaxu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vmaxu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.umax.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vmaxu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB80_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB82_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v24, v0, a2
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmaxu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmaxu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i64> @vmaxu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.umax.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vmaxu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.umax.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vmaxu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.umax.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vmaxu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmaxu_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmaxu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.umax.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmaxu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmaxu_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmaxu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vadd.vv v9, v9, v9
; CHECK-NEXT: vsra.vi v9, v9, 1
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vmin_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.smin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vmin_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vmin_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv1i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vmin_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.smin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vmin_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vmin_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.smin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 3 x i8> @vmin_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vmin_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.smin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vmin_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vmin_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.smin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vmin_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vmin_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.smin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vmin_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vmin_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.smin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vmin_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vmin_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.smin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vmin_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: li a5, 0
; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
-; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: sub a1, a2, a3
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB34_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB34_4:
-; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i16> @vmin_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.smin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vmin_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vmin_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.smin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vmin_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vmin_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.smin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vmin_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vmin_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.smin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vmin_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vmin_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.smin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vmin_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vmin_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.smin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vmin_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vmin_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.smin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vmin_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vmin_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.smin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vmin_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vmin_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.smin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vmin_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vmin_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.smin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vmin_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vmin_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.smin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vmin_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB80_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB82_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v24, v0, a2
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmin.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vmin.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i64> @vmin_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.smin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vmin_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.smin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vmin_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.smin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vmin_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmin_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmin.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.smin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmin.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmin_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmin.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vand.vx v8, v8, a2
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vand.vx v9, v9, a2
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vminu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.umin.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vminu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vminu_vx_nxv1i8_commute(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv1i8_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vminu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.umin.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vminu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vminu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.umin.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 3 x i8> @vminu_vx_nxv3i8(<vscale x 3 x i8> %va, i8 %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 3 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vminu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.umin.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vminu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vminu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.umin.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vminu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vminu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.umin.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vminu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vminu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.umin.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vminu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vminu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.umin.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vminu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
; CHECK-NEXT: li a5, 0
; CHECK-NEXT: vsetvli a6, zero, e8, m8, ta, ma
; CHECK-NEXT: vlm.v v24, (a1)
-; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e8, m8, ta, ma
; CHECK-NEXT: sub a1, a2, a3
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: bltu a2, a1, .LBB34_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a5, a1
; CHECK-NEXT: .LBB34_4:
-; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a5, e8, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i16> @vminu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.umin.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vminu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vminu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.umin.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vminu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vminu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.umin.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vminu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vminu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.umin.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vminu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vminu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.umin.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vminu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vminu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.umin.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vminu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vminu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.umin.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vminu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vminu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.umin.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vminu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vminu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.umin.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vminu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vminu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.umin.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vminu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vminu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.umin.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vminu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB80_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB80_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB80_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: mv a3, a4
; CHECK-NEXT: .LBB82_2:
-; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: bltu a1, a2, .LBB82_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB82_4:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vsetvli a3, zero, e8, mf2, ta, ma
; CHECK-NEXT: vslidedown.vx v24, v0, a2
; CHECK-NEXT: slli a1, a1, 1
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vminu.vx v8, v8, a0, v0.t
-; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, mu
+; CHECK-NEXT: vsetivli zero, 0, e32, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v24
; CHECK-NEXT: vminu.vx v16, v16, a0, v0.t
; CHECK-NEXT: ret
define <vscale x 1 x i64> @vminu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.umin.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vminu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.umin.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vminu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.umin.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vminu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vminu_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vminu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.umin.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vminu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vminu_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vminu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i7> @vmul_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vmul_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.mul.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vmul_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vmul_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.mul.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vmul_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vmul_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.mul.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vmul_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vmul_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.mul.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vmul_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vmul_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.mul.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vmul_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vmul_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.mul.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vmul_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vmul_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.mul.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vmul_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i16> @vmul_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.mul.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vmul_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vmul_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.mul.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vmul_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vmul_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.mul.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vmul_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vmul_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.mul.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vmul_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vmul_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.mul.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vmul_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vmul_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.mul.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vmul_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vmul_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.mul.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vmul_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vmul_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.mul.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vmul_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vmul_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.mul.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vmul_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 7 x i32> @vmul_vv_nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv7i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 7 x i32> @llvm.vp.mul.nxv7i32(<vscale x 7 x i32> %va, <vscale x 7 x i32> %b, <vscale x 7 x i1> %m, i32 %evl)
define <vscale x 7 x i32> @vmul_vx_nxv7i32(<vscale x 7 x i32> %va, i32 %b, <vscale x 7 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv7i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 7 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vmul_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.mul.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vmul_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vmul_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.mul.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vmul_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vmul_vx_nxv16i32_commute(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vx_nxv16i32_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vmul.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i64> @vmul_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.mul.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vmul_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.mul.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vmul_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.mul.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vmul_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vmul_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vmul.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.mul.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vmul.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vmul_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmul.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i7> @vor_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.or.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vor_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vor_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
define <vscale x 2 x i8> @vor_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.or.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vor_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vor_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
define <vscale x 4 x i8> @vor_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.or.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vor_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vor_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
define <vscale x 8 x i8> @vor_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.or.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vor_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vor_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
define <vscale x 16 x i8> @vor_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.or.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vor_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vor_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
define <vscale x 32 x i8> @vor_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.or.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vor_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vor_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
define <vscale x 64 x i8> @vor_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.or.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vor_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vor_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
define <vscale x 1 x i16> @vor_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.or.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vor_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vor_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
define <vscale x 2 x i16> @vor_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.or.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vor_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vor_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
define <vscale x 4 x i16> @vor_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.or.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vor_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vor_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
define <vscale x 8 x i16> @vor_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.or.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vor_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vor_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
define <vscale x 16 x i16> @vor_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.or.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vor_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vor_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
define <vscale x 32 x i16> @vor_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.or.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vor_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vor_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
define <vscale x 1 x i32> @vor_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.or.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vor_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vor_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
define <vscale x 2 x i32> @vor_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.or.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vor_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vor_vx_nxv2i32_commute(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv2i32_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vor_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
define <vscale x 4 x i32> @vor_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.or.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vor_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vor_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
define <vscale x 8 x i32> @vor_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.or.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vor_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vor_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
define <vscale x 10 x i32> @vor_vv_nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv10i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 10 x i32> @llvm.vp.or.nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i32> %b, <vscale x 10 x i1> %m, i32 %evl)
define <vscale x 10 x i32> @vor_vx_nxv10i32(<vscale x 10 x i32> %va, i32 %b, <vscale x 10 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv10i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 10 x i32> poison, i32 %b, i32 0
define <vscale x 10 x i32> @vor_vi_nxv10i32(<vscale x 10 x i32> %va, <vscale x 10 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv10i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 10 x i32> poison, i32 5, i32 0
define <vscale x 16 x i32> @vor_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.or.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vor_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vor_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
define <vscale x 1 x i64> @vor_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.or.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vor.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vor_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
define <vscale x 2 x i64> @vor_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.or.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vor.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vor_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
define <vscale x 4 x i64> @vor_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.or.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vor.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vor_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
define <vscale x 8 x i64> @vor_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.or.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vor.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vor_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vor_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vor_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vor.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
define <vscale x 1 x i8> @vpgather_nxv1i8(<vscale x 1 x i8*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <vscale x 2 x i8> @vpgather_nxv2i8(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <vscale x 2 x i16> @vpgather_nxv2i8_sextload_nxv2i16(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
;
; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV64-NEXT: vsext.vf2 v8, v10
define <vscale x 2 x i16> @vpgather_nxv2i8_zextload_nxv2i16(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
;
; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e16, mf2, ta, ma
; RV64-NEXT: vzext.vf2 v8, v10
define <vscale x 2 x i32> @vpgather_nxv2i8_sextload_nxv2i32(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV32-NEXT: vsext.vf4 v8, v9
;
; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV64-NEXT: vsext.vf4 v8, v10
define <vscale x 2 x i32> @vpgather_nxv2i8_zextload_nxv2i32(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV32-NEXT: vzext.vf4 v8, v9
;
; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV64-NEXT: vzext.vf4 v8, v10
define <vscale x 2 x i64> @vpgather_nxv2i8_sextload_nxv2i64(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8_sextload_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV32-NEXT: vsext.vf8 v8, v10
;
; RV64-LABEL: vpgather_nxv2i8_sextload_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vsext.vf8 v8, v10
define <vscale x 2 x i64> @vpgather_nxv2i8_zextload_nxv2i64(<vscale x 2 x i8*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i8_zextload_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV32-NEXT: vzext.vf8 v8, v10
;
; RV64-LABEL: vpgather_nxv2i8_zextload_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vzext.vf8 v8, v10
define <vscale x 4 x i8> @vpgather_nxv4i8(<vscale x 4 x i8*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v12
; RV64-NEXT: ret
define <vscale x 8 x i8> @vpgather_nxv8i8(<vscale x 8 x i8*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8i8:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8i8:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
-; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
-; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i8, i8* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: .LBB12_2:
; RV32-NEXT: vsetvli a4, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v24, v10
-; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a3, e8, m2, ta, ma
; RV32-NEXT: vluxei32.v v18, (a0), v24, v0.t
; RV32-NEXT: bltu a1, a2, .LBB12_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: .LBB12_4:
; RV32-NEXT: vsetvli a2, zero, e32, m8, ta, ma
; RV32-NEXT: vsext.vf4 v24, v8
-; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v16, (a0), v24, v0.t
; RV32-NEXT: vmv4r.v v8, v16
; RV64-NEXT: vslidedown.vx v0, v13, a6
; RV64-NEXT: vsetvli t0, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v11
-; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a7, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v19, (a0), v24, v0.t
; RV64-NEXT: bltu a1, a5, .LBB12_6
; RV64-NEXT: # %bb.5:
; RV64-NEXT: vslidedown.vx v0, v12, a6
; RV64-NEXT: vsetvli a5, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v9
-; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a4, e8, m1, ta, ma
; RV64-NEXT: vluxei64.v v17, (a0), v24, v0.t
; RV64-NEXT: bltu a1, a3, .LBB12_10
; RV64-NEXT: # %bb.9:
; RV64-NEXT: .LBB12_10:
; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v8
-; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v16, (a0), v24, v0.t
; RV64-NEXT: bltu a2, a3, .LBB12_12
; RV64-NEXT: .LBB12_12:
; RV64-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v24, v10
-; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e8, m1, ta, ma
; RV64-NEXT: vmv1r.v v0, v13
; RV64-NEXT: vluxei64.v v18, (a0), v24, v0.t
; RV64-NEXT: vmv4r.v v8, v16
define <vscale x 1 x i16> @vpgather_nxv1i16(<vscale x 1 x i16*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <vscale x 2 x i16> @vpgather_nxv2i16(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <vscale x 2 x i32> @vpgather_nxv2i16_sextload_nxv2i32(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV32-NEXT: vsext.vf2 v8, v9
;
; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV64-NEXT: vsext.vf2 v8, v10
define <vscale x 2 x i32> @vpgather_nxv2i16_zextload_nxv2i32(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV32-NEXT: vzext.vf2 v8, v9
;
; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e32, m1, ta, ma
; RV64-NEXT: vzext.vf2 v8, v10
define <vscale x 2 x i64> @vpgather_nxv2i16_sextload_nxv2i64(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i16_sextload_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV32-NEXT: vsext.vf4 v8, v10
;
; RV64-LABEL: vpgather_nxv2i16_sextload_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vsext.vf4 v8, v10
define <vscale x 2 x i64> @vpgather_nxv2i16_zextload_nxv2i64(<vscale x 2 x i16*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i16_zextload_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV32-NEXT: vzext.vf4 v8, v10
;
; RV64-LABEL: vpgather_nxv2i16_zextload_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vzext.vf4 v8, v10
define <vscale x 4 x i16> @vpgather_nxv4i16(<vscale x 4 x i16*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
define <vscale x 8 x i16> @vpgather_nxv8i16(<vscale x 8 x i16*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8i16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8i16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i16, i16* %base, <vscale x 8 x i16> %idxs
define <vscale x 1 x i32> @vpgather_nxv1i32(<vscale x 1 x i32*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <vscale x 2 x i32> @vpgather_nxv2i32(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
define <vscale x 2 x i64> @vpgather_nxv2i32_sextload_nxv2i64(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i32_sextload_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV32-NEXT: vsext.vf2 v8, v10
;
; RV64-LABEL: vpgather_nxv2i32_sextload_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vsext.vf2 v8, v10
define <vscale x 2 x i64> @vpgather_nxv2i32_zextload_nxv2i64(<vscale x 2 x i32*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i32_zextload_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV32-NEXT: vzext.vf2 v8, v10
;
; RV64-LABEL: vpgather_nxv2i32_zextload_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vsetvli a0, zero, e64, m2, ta, ma
; RV64-NEXT: vzext.vf2 v8, v10
define <vscale x 4 x i32> @vpgather_nxv4i32(<vscale x 4 x i32*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
define <vscale x 8 x i32> @vpgather_nxv8i32(<vscale x 8 x i32*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8i32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8i32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i16> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i32, i32* %base, <vscale x 8 x i32> %idxs
define <vscale x 1 x i64> @vpgather_nxv1i64(<vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.gather.nxv1i64.nxv1p0i64(<vscale x 1 x i64*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i64> @vpgather_nxv2i64(<vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.gather.nxv2i64.nxv2p0i64(<vscale x 2 x i64*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i64> @vpgather_nxv4i64(<vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.gather.nxv4i64.nxv4p0i64(<vscale x 4 x i64*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vpgather_nxv8i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.gather.nxv8i64.nxv8p0i64(<vscale x 8 x i64*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i16> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i32> %idxs
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsll.vi v16, v16, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsll.vi v8, v8, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds i64, i64* %base, <vscale x 8 x i64> %idxs
define <vscale x 1 x half> @vpgather_nxv1f16(<vscale x 1 x half*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <vscale x 2 x half> @vpgather_nxv2f16(<vscale x 2 x half*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv1r.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v10
; RV64-NEXT: ret
define <vscale x 4 x half> @vpgather_nxv4f16(<vscale x 4 x half*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
define <vscale x 8 x half> @vpgather_nxv8f16(<vscale x 8 x half*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8f16:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8f16:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i16>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vadd.vv v12, v12, v12
-; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v12, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vadd.vv v16, v16, v16
-; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds half, half* %base, <vscale x 8 x i16> %idxs
define <vscale x 1 x float> @vpgather_nxv1f32(<vscale x 1 x float*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; RV64-NEXT: vluxei64.v v9, (zero), v8, v0.t
; RV64-NEXT: vmv1r.v v8, v9
; RV64-NEXT: ret
define <vscale x 2 x float> @vpgather_nxv2f32(<vscale x 2 x float*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; RV64-NEXT: vluxei64.v v10, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v10
; RV64-NEXT: ret
define <vscale x 4 x float> @vpgather_nxv4f32(<vscale x 4 x float*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; RV64-NEXT: vluxei64.v v12, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v12
; RV64-NEXT: ret
define <vscale x 8 x float> @vpgather_nxv8f32(<vscale x 8 x float*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8f32:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (zero), v8, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8f32:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v8, v0.t
; RV64-NEXT: vmv.v.v v8, v16
; RV64-NEXT: ret
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i32>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i16> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v8, v12, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf4 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i32>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v8, v8, 2
-; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v8, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v16, v16, 2
-; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v16, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds float, float* %base, <vscale x 8 x i32> %idxs
define <vscale x 1 x double> @vpgather_nxv1f64(<vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv1f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV32-NEXT: vluxei32.v v9, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v9
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv1f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 1 x double> @llvm.vp.gather.nxv1f64.nxv1p0f64(<vscale x 1 x double*> %ptrs, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vpgather_nxv2f64(<vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv2f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV32-NEXT: vluxei32.v v10, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v10
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv2f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 2 x double> @llvm.vp.gather.nxv2f64.nxv2p0f64(<vscale x 2 x double*> %ptrs, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vpgather_nxv4f64(<vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv4f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV32-NEXT: vluxei32.v v12, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v12
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv4f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 4 x double> @llvm.vp.gather.nxv4f64.nxv4p0f64(<vscale x 4 x double*> %ptrs, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 6 x double> @vpgather_nxv6f64(<vscale x 6 x double*> %ptrs, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv6f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv6f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 6 x double> @llvm.vp.gather.nxv6f64.nxv6p0f64(<vscale x 6 x double*> %ptrs, <vscale x 6 x i1> %m, i32 %evl)
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 6 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i8> %idxs to <vscale x 6 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i8> %idxs to <vscale x 6 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 6 x i16> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i16> %idxs to <vscale x 6 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i16> %idxs to <vscale x 6 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 6 x i32> %idxs
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 6 x i32> %idxs to <vscale x 6 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsll.vi v16, v16, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsll.vi v8, v8, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 6 x i64> %idxs
define <vscale x 8 x double> @vpgather_nxv8f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vpgather_nxv8f64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v16
; RV32-NEXT: ret
;
; RV64-LABEL: vpgather_nxv8f64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
%v = call <vscale x 8 x double> @llvm.vp.gather.nxv8f64.nxv8p0f64(<vscale x 8 x double*> %ptrs, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i8> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf4 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf8 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i8> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i16> %idxs
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vzext.vf2 v12, v8
; RV32-NEXT: vsll.vi v16, v12, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf4 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i16> %idxs to <vscale x 8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i32> %idxs
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = sext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
; RV32: # %bb.0:
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vsll.vi v16, v8, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vzext.vf2 v16, v8
; RV64-NEXT: vsll.vi v8, v16, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%eidxs = zext <vscale x 8 x i32> %idxs to <vscale x 8 x i64>
; RV32-NEXT: vsetvli a2, zero, e32, m4, ta, ma
; RV32-NEXT: vnsrl.wi v16, v8, 0
; RV32-NEXT: vsll.vi v16, v16, 3
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v8, (a0), v16, v0.t
; RV32-NEXT: ret
;
; RV64: # %bb.0:
; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, ma
; RV64-NEXT: vsll.vi v8, v8, 3
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v8, (a0), v8, v0.t
; RV64-NEXT: ret
%ptrs = getelementptr inbounds double, double* %base, <vscale x 8 x i64> %idxs
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a2, a3
; RV32-NEXT: .LBB102_2:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (zero), v12, v0.t
; RV32-NEXT: bltu a0, a1, .LBB102_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: mv a0, a1
; RV32-NEXT: .LBB102_4:
-; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v24
; RV32-NEXT: vluxei32.v v24, (zero), v8, v0.t
; RV32-NEXT: vmv.v.v v8, v24
; RV64-NEXT: # %bb.1:
; RV64-NEXT: mv a2, a3
; RV64-NEXT: .LBB102_2:
-; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (zero), v16, v0.t
; RV64-NEXT: bltu a0, a1, .LBB102_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: mv a0, a1
; RV64-NEXT: .LBB102_4:
-; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v24
; RV64-NEXT: vluxei64.v v8, (zero), v8, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a4
; RV32-NEXT: .LBB103_2:
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB103_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB103_4:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: .LBB103_2:
; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma
; RV64-NEXT: vsll.vi v24, v24, 3
-; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a2, .LBB103_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB103_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a4
; RV32-NEXT: .LBB104_2:
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB104_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB104_4:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: .LBB104_2:
; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma
; RV64-NEXT: vsll.vi v24, v24, 3
-; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a2, .LBB104_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB104_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
; RV32-NEXT: # %bb.1:
; RV32-NEXT: mv a3, a4
; RV32-NEXT: .LBB105_2:
-; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV32-NEXT: vluxei32.v v16, (a0), v28, v0.t
; RV32-NEXT: bltu a1, a2, .LBB105_4
; RV32-NEXT: # %bb.3:
; RV32-NEXT: mv a1, a2
; RV32-NEXT: .LBB105_4:
-; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV32-NEXT: vmv1r.v v0, v12
; RV32-NEXT: vluxei32.v v8, (a0), v24, v0.t
; RV32-NEXT: ret
; RV64-NEXT: .LBB105_2:
; RV64-NEXT: vsetvli a4, zero, e64, m8, ta, ma
; RV64-NEXT: vsll.vi v24, v24, 3
-; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; RV64-NEXT: vluxei64.v v16, (a0), v16, v0.t
; RV64-NEXT: bltu a1, a2, .LBB105_4
; RV64-NEXT: # %bb.3:
; RV64-NEXT: mv a1, a2
; RV64-NEXT: .LBB105_4:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vmv1r.v v0, v12
; RV64-NEXT: vluxei64.v v8, (a0), v24, v0.t
; RV64-NEXT: ret
define <vscale x 1 x i8> @vpload_nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i8> @llvm.vp.load.nxv1i8.p0nxv1i8(<vscale x 1 x i8>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vpload_nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i8> @llvm.vp.load.nxv2i8.p0nxv2i8(<vscale x 2 x i8>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 3 x i8> @vpload_nxv3i8(<vscale x 3 x i8>* %ptr, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 3 x i8> @llvm.vp.load.nxv3i8.p0nxv3i8(<vscale x 3 x i8>* %ptr, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vpload_nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i8> @llvm.vp.load.nxv4i8.p0nxv4i8(<vscale x 4 x i8>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vpload_nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vle8.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i8> @llvm.vp.load.nxv8i8.p0nxv8i8(<vscale x 8 x i8>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vpload_nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i16> @llvm.vp.load.nxv1i16.p0nxv1i16(<vscale x 1 x i16>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vpload_nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i16> @llvm.vp.load.nxv2i16.p0nxv2i16(<vscale x 2 x i16>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vpload_nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i16> @llvm.vp.load.nxv4i16.p0nxv4i16(<vscale x 4 x i16>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vpload_nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i16> @llvm.vp.load.nxv8i16.p0nxv8i16(<vscale x 8 x i16>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vpload_nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i32> @llvm.vp.load.nxv1i32.p0nxv1i32(<vscale x 1 x i32>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vpload_nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<vscale x 2 x i32>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vpload_nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i32> @llvm.vp.load.nxv4i32.p0nxv4i32(<vscale x 4 x i32>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vpload_nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i32> @llvm.vp.load.nxv8i32.p0nxv8i32(<vscale x 8 x i32>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x i64> @vpload_nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x i64> @llvm.vp.load.nxv1i64.p0nxv1i64(<vscale x 1 x i64>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x i64> @vpload_nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x i64> @llvm.vp.load.nxv2i64.p0nxv2i64(<vscale x 2 x i64>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x i64> @vpload_nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x i64> @llvm.vp.load.nxv4i64.p0nxv4i64(<vscale x 4 x i64>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vpload_nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x i64> @llvm.vp.load.nxv8i64.p0nxv8i64(<vscale x 8 x i64>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x half> @vpload_nxv1f16(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x half> @llvm.vp.load.nxv1f16.p0nxv1f16(<vscale x 1 x half>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x half> @vpload_nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x half> @llvm.vp.load.nxv2f16.p0nxv2f16(<vscale x 2 x half>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x half> @vpload_nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x half> @llvm.vp.load.nxv4f16.p0nxv4f16(<vscale x 4 x half>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x half> @vpload_nxv8f16(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8f16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vle16.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x half> @llvm.vp.load.nxv8f16.p0nxv8f16(<vscale x 8 x half>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x float> @vpload_nxv1f32(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x float> @llvm.vp.load.nxv1f32.p0nxv1f32(<vscale x 1 x float>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x float> @vpload_nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x float> @llvm.vp.load.nxv2f32.p0nxv2f32(<vscale x 2 x float>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x float> @vpload_nxv4f32(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x float> @llvm.vp.load.nxv4f32.p0nxv4f32(<vscale x 4 x float>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x float> @vpload_nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8f32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x float> @llvm.vp.load.nxv8f32.p0nxv8f32(<vscale x 8 x float>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 1 x double> @vpload_nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv1f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 1 x double> @llvm.vp.load.nxv1f64.p0nxv1f64(<vscale x 1 x double>* %ptr, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 2 x double> @vpload_nxv2f64(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv2f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 2 x double> @llvm.vp.load.nxv2f64.p0nxv2f64(<vscale x 2 x double>* %ptr, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 4 x double> @vpload_nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv4f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 4 x double> @llvm.vp.load.nxv4f64.p0nxv4f64(<vscale x 4 x double>* %ptr, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 8 x double> @vpload_nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vpload_nxv8f64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
%load = call <vscale x 8 x double> @llvm.vp.load.nxv8f64.p0nxv8f64(<vscale x 8 x double>* %ptr, <vscale x 8 x i1> %m, i32 %evl)
; CHECK-NEXT: .LBB37_2:
; CHECK-NEXT: slli a4, a2, 3
; CHECK-NEXT: add a4, a0, a4
-; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a3, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (a4), v0.t
; CHECK-NEXT: bltu a1, a2, .LBB37_4
; CHECK-NEXT: # %bb.3:
; CHECK-NEXT: mv a1, a2
; CHECK-NEXT: .LBB37_4:
-; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: ret
; CHECK-NEXT: vslidedown.vx v0, v8, t0
; CHECK-NEXT: slli t0, a3, 3
; CHECK-NEXT: add t0, a0, t0
-; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a6, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v16, (t0), v0.t
; CHECK-NEXT: srli a6, a3, 2
; CHECK-NEXT: sub t0, a2, a5
; CHECK-NEXT: # %bb.7:
; CHECK-NEXT: mv a7, a3
; CHECK-NEXT: .LBB38_8:
-; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a7, e64, m8, ta, ma
; CHECK-NEXT: vle64.v v24, (a2), v0.t
; CHECK-NEXT: bltu a4, a3, .LBB38_10
; CHECK-NEXT: # %bb.9:
; CHECK-NEXT: mv a4, a3
; CHECK-NEXT: .LBB38_10:
-; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a4, e64, m8, ta, ma
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: vle64.v v8, (a0), v0.t
; CHECK-NEXT: vs1r.v v24, (a1)
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vadd.vv v9, v9, v9
; CHECK-NEXT: vsra.vi v9, v9, 1
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vrem_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.srem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vrem_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vrem_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.srem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vrem_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vrem_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.srem.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vrem_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.srem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vrem_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vrem_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.srem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vrem_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vrem_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.srem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vrem_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vrem_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.srem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vrem_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vrem_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.srem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vrem_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i16> @vrem_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.srem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vrem_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vrem_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.srem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vrem_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vrem_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.srem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vrem_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vrem_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.srem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vrem_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vrem_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.srem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vrem_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vrem_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.srem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vrem_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vrem_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.srem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vrem_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vrem_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.srem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vrem_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vrem_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.srem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vrem_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vrem_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.srem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vrem_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vrem_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.srem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vrem_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vrem.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i64> @vrem_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.srem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vrem_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.srem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vrem_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.srem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vrem_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrem_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrem.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.srem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vrem.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrem_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vrem.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
; CHECK-NEXT: vand.vx v8, v8, a2
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vand.vx v9, v9, a2
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vremu_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.urem.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vremu_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vremu_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.urem.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vremu_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 3 x i8> @vremu_vv_nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv3i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 3 x i8> @llvm.vp.urem.nxv3i8(<vscale x 3 x i8> %va, <vscale x 3 x i8> %b, <vscale x 3 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vremu_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.urem.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vremu_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vremu_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.urem.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vremu_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vremu_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.urem.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vremu_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vremu_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.urem.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vremu_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vremu_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.urem.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vremu_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i16> @vremu_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.urem.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vremu_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vremu_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.urem.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vremu_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vremu_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.urem.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vremu_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vremu_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.urem.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vremu_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vremu_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.urem.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vremu_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vremu_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.urem.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vremu_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vremu_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.urem.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vremu_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vremu_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.urem.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vremu_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vremu_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.urem.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vremu_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vremu_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.urem.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vremu_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vremu_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.urem.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vremu_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vremu.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i64> @vremu_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.urem.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vremu_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.urem.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vremu_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.urem.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vremu_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vremu_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vremu.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.urem.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vremu.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vremu_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vremu.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i8> @vrsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vrsub_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 2, i32 0
define <vscale x 2 x i8> @vrsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vrsub_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 2, i32 0
define <vscale x 4 x i8> @vrsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vrsub_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 2, i32 0
define <vscale x 8 x i8> @vrsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vrsub_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 2, i32 0
define <vscale x 16 x i8> @vrsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vrsub_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 2, i32 0
define <vscale x 32 x i8> @vrsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vrsub_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 2, i32 0
define <vscale x 64 x i8> @vrsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vrsub_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 2, i32 0
define <vscale x 1 x i16> @vrsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vrsub_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 2, i32 0
define <vscale x 2 x i16> @vrsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vrsub_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 2, i32 0
define <vscale x 4 x i16> @vrsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vrsub_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 2, i32 0
define <vscale x 8 x i16> @vrsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vrsub_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 2, i32 0
define <vscale x 16 x i16> @vrsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vrsub_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 2, i32 0
define <vscale x 32 x i16> @vrsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vrsub_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 2, i32 0
define <vscale x 1 x i32> @vrsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vrsub_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 2, i32 0
define <vscale x 2 x i32> @vrsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vrsub_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 2, i32 0
define <vscale x 4 x i32> @vrsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vrsub_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 2, i32 0
define <vscale x 8 x i32> @vrsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vrsub_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 2, i32 0
define <vscale x 16 x i32> @vrsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vrsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vrsub_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsub.vv v8, v9, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vrsub_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsub.vv v8, v10, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vrsub_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsub.vv v8, v12, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vrsub_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 2, i32 0
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsub.vv v8, v16, v8, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vrsub_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vrsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vrsub_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vrsub_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vrsub.vi v8, v8, 2, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 2, i32 0
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: vsetivli a0, 6, e32, m2, tu, ma
; CHECK-NEXT: vmv.s.x v8, a0
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, tu, mu
+; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
<vscale x 4 x i1> %mask) {
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vand.vx v9, v9, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vsll_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.shl.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vsll_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vsll_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 3, i32 0
define <vscale x 2 x i8> @vsll_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.shl.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vsll_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vsll_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 3, i32 0
define <vscale x 4 x i8> @vsll_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.shl.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vsll_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vsll_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 3, i32 0
define <vscale x 5 x i8> @vsll_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 5 x i8> @llvm.vp.shl.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vsll_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.shl.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vsll_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vsll_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 3, i32 0
define <vscale x 16 x i8> @vsll_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.shl.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vsll_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vsll_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 3, i32 0
define <vscale x 32 x i8> @vsll_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.shl.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vsll_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vsll_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 3, i32 0
define <vscale x 64 x i8> @vsll_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.shl.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vsll_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vsll_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 3, i32 0
define <vscale x 1 x i16> @vsll_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.shl.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vsll_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vsll_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 3, i32 0
define <vscale x 2 x i16> @vsll_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.shl.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vsll_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vsll_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 3, i32 0
define <vscale x 4 x i16> @vsll_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.shl.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vsll_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vsll_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 3, i32 0
define <vscale x 8 x i16> @vsll_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.shl.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vsll_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vsll_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 3, i32 0
define <vscale x 16 x i16> @vsll_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.shl.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vsll_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vsll_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 3, i32 0
define <vscale x 32 x i16> @vsll_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.shl.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vsll_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vsll_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 3, i32 0
define <vscale x 1 x i32> @vsll_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.shl.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vsll_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vsll_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 3, i32 0
define <vscale x 2 x i32> @vsll_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.shl.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vsll_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vsll_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 3, i32 0
define <vscale x 4 x i32> @vsll_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.shl.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vsll_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vsll_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 3, i32 0
define <vscale x 8 x i32> @vsll_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.shl.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vsll_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vsll_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 3, i32 0
define <vscale x 16 x i32> @vsll_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.shl.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vsll_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vsll.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vsll_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 3, i32 0
define <vscale x 1 x i64> @vsll_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.shl.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i64> @vsll_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_nxv1i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vsll_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 3, i32 0
define <vscale x 2 x i64> @vsll_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.shl.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i64> @vsll_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vsll_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 3, i32 0
define <vscale x 4 x i64> @vsll_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.shl.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i64> @vsll_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_nxv4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vsll_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 3, i32 0
define <vscale x 8 x i64> @vsll_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsll.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.shl.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vsll_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsll_vx_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsll.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsll_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsll.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vsll_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsll_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsll.vi v8, v8, 3, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 3, i32 0
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: li a0, 127
; CHECK-NEXT: vand.vx v9, v9, a0
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vsra_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.ashr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vsra_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vsra_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 5, i32 0
define <vscale x 2 x i8> @vsra_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.ashr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vsra_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vsra_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 5, i32 0
define <vscale x 4 x i8> @vsra_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.ashr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vsra_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vsra_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 5, i32 0
define <vscale x 8 x i8> @vsra_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.ashr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vsra_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vsra_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 5, i32 0
define <vscale x 16 x i8> @vsra_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.ashr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vsra_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vsra_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 5, i32 0
define <vscale x 32 x i8> @vsra_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.ashr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vsra_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vsra_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 5, i32 0
define <vscale x 64 x i8> @vsra_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.ashr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vsra_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vsra_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 5, i32 0
define <vscale x 1 x i16> @vsra_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.ashr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vsra_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vsra_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 5, i32 0
define <vscale x 2 x i16> @vsra_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.ashr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vsra_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vsra_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 5, i32 0
define <vscale x 4 x i16> @vsra_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.ashr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vsra_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vsra_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 5, i32 0
define <vscale x 8 x i16> @vsra_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.ashr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vsra_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vsra_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 5, i32 0
define <vscale x 16 x i16> @vsra_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.ashr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vsra_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vsra_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 5, i32 0
define <vscale x 32 x i16> @vsra_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.ashr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vsra_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vsra_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 5, i32 0
define <vscale x 1 x i32> @vsra_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.ashr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vsra_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vsra_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 5, i32 0
define <vscale x 2 x i32> @vsra_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.ashr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vsra_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vsra_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 5, i32 0
define <vscale x 4 x i32> @vsra_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.ashr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vsra_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vsra_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 5, i32 0
define <vscale x 8 x i32> @vsra_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.ashr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vsra_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vsra_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 5, i32 0
define <vscale x 16 x i32> @vsra_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.ashr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vsra_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vsra.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vsra_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 5, i32 0
define <vscale x 1 x i64> @vsra_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.ashr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i64> @vsra_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_nxv1i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vsra_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 5, i32 0
define <vscale x 2 x i64> @vsra_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.ashr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i64> @vsra_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vsra_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 5, i32 0
define <vscale x 4 x i64> @vsra_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.ashr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i64> @vsra_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_nxv4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vsra_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 5, i32 0
define <vscale x 6 x i64> @vsra_vv_nxv6i64(<vscale x 6 x i64> %va, <vscale x 6 x i64> %b, <vscale x 6 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv6i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 6 x i64> @llvm.vp.ashr.nxv6i64(<vscale x 6 x i64> %va, <vscale x 6 x i64> %b, <vscale x 6 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vsra_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsra.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.ashr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vsra_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsra_vx_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsra.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsra_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsra.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vsra_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsra_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsra.vi v8, v8, 5, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 5, i32 0
; CHECK-NEXT: vand.vx v8, v8, a2
; CHECK-NEXT: vmv.v.x v9, a0
; CHECK-NEXT: vand.vx v9, v9, a2
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vsrl_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.lshr.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vsrl_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vsrl_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 4, i32 0
define <vscale x 2 x i8> @vsrl_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.lshr.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vsrl_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vsrl_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 4, i32 0
define <vscale x 4 x i8> @vsrl_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.lshr.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vsrl_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vsrl_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 4, i32 0
define <vscale x 8 x i8> @vsrl_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.lshr.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vsrl_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vsrl_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 4, i32 0
define <vscale x 16 x i8> @vsrl_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.lshr.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vsrl_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vsrl_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 4, i32 0
define <vscale x 32 x i8> @vsrl_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.lshr.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vsrl_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vsrl_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 4, i32 0
define <vscale x 64 x i8> @vsrl_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.lshr.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vsrl_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vsrl_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 4, i32 0
define <vscale x 1 x i16> @vsrl_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.lshr.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vsrl_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vsrl_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 4, i32 0
define <vscale x 2 x i16> @vsrl_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.lshr.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vsrl_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vsrl_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 4, i32 0
define <vscale x 4 x i16> @vsrl_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.lshr.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vsrl_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vsrl_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 4, i32 0
define <vscale x 8 x i16> @vsrl_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.lshr.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vsrl_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vsrl_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 4, i32 0
define <vscale x 16 x i16> @vsrl_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.lshr.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vsrl_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vsrl_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 4, i32 0
define <vscale x 32 x i16> @vsrl_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.lshr.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vsrl_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vsrl_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 4, i32 0
define <vscale x 1 x i32> @vsrl_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.lshr.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vsrl_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vsrl_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 4, i32 0
define <vscale x 2 x i32> @vsrl_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.lshr.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vsrl_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vsrl_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 4, i32 0
define <vscale x 4 x i32> @vsrl_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.lshr.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vsrl_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vsrl_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 4, i32 0
define <vscale x 8 x i32> @vsrl_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.lshr.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vsrl_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vsrl_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 4, i32 0
define <vscale x 16 x i32> @vsrl_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.lshr.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vsrl_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vsrl.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vsrl_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 4, i32 0
define <vscale x 1 x i64> @vsrl_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.lshr.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i64> @vsrl_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_nxv1i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vsrl_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 4, i32 0
define <vscale x 2 x i64> @vsrl_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.lshr.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i64> @vsrl_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_nxv2i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vsrl_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 4, i32 0
define <vscale x 4 x i64> @vsrl_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.lshr.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i64> @vsrl_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_nxv4i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vsrl_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 4, i32 0
define <vscale x 5 x i64> @vsrl_vv_nxv5i64(<vscale x 5 x i64> %va, <vscale x 5 x i64> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv5i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 5 x i64> @llvm.vp.lshr.nxv5i64(<vscale x 5 x i64> %va, <vscale x 5 x i64> %b, <vscale x 5 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vsrl_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsrl.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.lshr.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i64> @vsrl_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; RV32-LABEL: vsrl_vx_nxv8i64:
; RV32: # %bb.0:
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV32-NEXT: ret
;
; RV64-LABEL: vsrl_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vsrl_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsrl_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsrl.vi v8, v8, 4, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 4, i32 0
define <vscale x 8 x i7> @vsub_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vsub_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.sub.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vsub_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vsub_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.sub.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vsub_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vsub_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.sub.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vsub_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 5 x i8> @vsub_vv_nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 5 x i8> @llvm.vp.sub.nxv5i8(<vscale x 5 x i8> %va, <vscale x 5 x i8> %b, <vscale x 5 x i1> %m, i32 %evl)
define <vscale x 5 x i8> @vsub_vx_nxv5i8(<vscale x 5 x i8> %va, i8 %b, <vscale x 5 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv5i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 5 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vsub_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.sub.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vsub_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vsub_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.sub.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vsub_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vsub_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.sub.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vsub_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vsub_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.sub.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vsub_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i16> @vsub_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.sub.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vsub_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vsub_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.sub.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vsub_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vsub_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.sub.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vsub_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vsub_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.sub.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vsub_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vsub_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.sub.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vsub_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vsub_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.sub.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vsub_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i32> @vsub_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.sub.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vsub_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vsub_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.sub.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vsub_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vsub_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.sub.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vsub_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vsub_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.sub.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vsub_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vsub_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.sub.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vsub_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vsub.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i64> @vsub_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.sub.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vsub_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.sub.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vsub_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.sub.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vsub_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vsub_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.sub.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vsub.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vsub_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vsub.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<vscale x 2 x i32> %a, <vscale x 2 x i1> %m, i32 %vl)
define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vand.vi v10, v8, 1, v0.t
-; CHECK-NEXT: vsetvli zero, zero, e64, m2, ta, ma
; CHECK-NEXT: vmsne.vi v8, v10, 0, v0.t
; CHECK-NEXT: vmv1r.v v0, v8
; CHECK-NEXT: ret
define <vscale x 8 x i7> @vxor_vx_nxv8i7(<vscale x 8 x i7> %a, i7 signext %b, <vscale x 8 x i1> %mask, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv8i7:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i7> poison, i7 %b, i32 0
define <vscale x 1 x i8> @vxor_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i8> @llvm.vp.xor.nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i8> @vxor_vx_nxv1i8(<vscale x 1 x i8> %va, i8 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 %b, i32 0
define <vscale x 1 x i8> @vxor_vi_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 7, i32 0
define <vscale x 1 x i8> @vxor_vi_nxv1i8_1(<vscale x 1 x i8> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i8> poison, i8 -1, i32 0
define <vscale x 2 x i8> @vxor_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i8> @llvm.vp.xor.nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i8> @vxor_vx_nxv2i8(<vscale x 2 x i8> %va, i8 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 %b, i32 0
define <vscale x 2 x i8> @vxor_vi_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 7, i32 0
define <vscale x 2 x i8> @vxor_vi_nxv2i8_1(<vscale x 2 x i8> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i8> poison, i8 -1, i32 0
define <vscale x 4 x i8> @vxor_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i8> @llvm.vp.xor.nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i8> @vxor_vx_nxv4i8(<vscale x 4 x i8> %va, i8 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 %b, i32 0
define <vscale x 4 x i8> @vxor_vi_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 7, i32 0
define <vscale x 4 x i8> @vxor_vi_nxv4i8_1(<vscale x 4 x i8> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i8> poison, i8 -1, i32 0
define <vscale x 8 x i8> @vxor_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i8> @llvm.vp.xor.nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i8> @vxor_vx_nxv8i8(<vscale x 8 x i8> %va, i8 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 %b, i32 0
define <vscale x 8 x i8> @vxor_vi_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 7, i32 0
define <vscale x 8 x i8> @vxor_vi_nxv8i8_1(<vscale x 8 x i8> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i8> poison, i8 -1, i32 0
define <vscale x 15 x i8> @vxor_vv_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv15i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 15 x i8> @llvm.vp.xor.nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i8> %b, <vscale x 15 x i1> %m, i32 %evl)
define <vscale x 15 x i8> @vxor_vx_nxv15i8(<vscale x 15 x i8> %va, i8 %b, <vscale x 15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv15i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 15 x i8> poison, i8 %b, i32 0
define <vscale x 15 x i8> @vxor_vi_nxv15i8(<vscale x 15 x i8> %va, <vscale x 15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv15i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 15 x i8> poison, i8 7, i32 0
define <vscale x 15 x i8> @vxor_vi_nxv15i8_1(<vscale x 15 x i8> %va, <vscale x 15 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv15i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 15 x i8> poison, i8 -1, i32 0
define <vscale x 16 x i8> @vxor_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i8> @llvm.vp.xor.nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i8> @vxor_vx_nxv16i8(<vscale x 16 x i8> %va, i8 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 %b, i32 0
define <vscale x 16 x i8> @vxor_vi_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv16i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 7, i32 0
define <vscale x 16 x i8> @vxor_vi_nxv16i8_1(<vscale x 16 x i8> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv16i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i8> poison, i8 -1, i32 0
define <vscale x 32 x i8> @vxor_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i8> @llvm.vp.xor.nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i8> @vxor_vx_nxv32i8(<vscale x 32 x i8> %va, i8 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 %b, i32 0
define <vscale x 32 x i8> @vxor_vi_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv32i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 7, i32 0
define <vscale x 32 x i8> @vxor_vi_nxv32i8_1(<vscale x 32 x i8> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv32i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i8> poison, i8 -1, i32 0
define <vscale x 64 x i8> @vxor_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 64 x i8> @llvm.vp.xor.nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %b, <vscale x 64 x i1> %m, i32 %evl)
define <vscale x 64 x i8> @vxor_vx_nxv64i8(<vscale x 64 x i8> %va, i8 %b, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e8, m8, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 %b, i32 0
define <vscale x 64 x i8> @vxor_vi_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv64i8:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 7, i32 0
define <vscale x 64 x i8> @vxor_vi_nxv64i8_1(<vscale x 64 x i8> %va, <vscale x 64 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv64i8_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 64 x i8> poison, i8 -1, i32 0
define <vscale x 1 x i16> @vxor_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i16> @llvm.vp.xor.nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i16> @vxor_vx_nxv1i16(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vxor_vx_nxv1i16_commute(<vscale x 1 x i16> %va, i16 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv1i16_commute:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 %b, i32 0
define <vscale x 1 x i16> @vxor_vi_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 7, i32 0
define <vscale x 1 x i16> @vxor_vi_nxv1i16_1(<vscale x 1 x i16> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i16> poison, i16 -1, i32 0
define <vscale x 2 x i16> @vxor_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i16> @llvm.vp.xor.nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i16> @vxor_vx_nxv2i16(<vscale x 2 x i16> %va, i16 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 %b, i32 0
define <vscale x 2 x i16> @vxor_vi_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 7, i32 0
define <vscale x 2 x i16> @vxor_vi_nxv2i16_1(<vscale x 2 x i16> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i16> poison, i16 -1, i32 0
define <vscale x 4 x i16> @vxor_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i16> @llvm.vp.xor.nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i16> @vxor_vx_nxv4i16(<vscale x 4 x i16> %va, i16 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 %b, i32 0
define <vscale x 4 x i16> @vxor_vi_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 7, i32 0
define <vscale x 4 x i16> @vxor_vi_nxv4i16_1(<vscale x 4 x i16> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i16> poison, i16 -1, i32 0
define <vscale x 8 x i16> @vxor_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i16> @llvm.vp.xor.nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i16> @vxor_vx_nxv8i16(<vscale x 8 x i16> %va, i16 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 %b, i32 0
define <vscale x 8 x i16> @vxor_vi_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 7, i32 0
define <vscale x 8 x i16> @vxor_vi_nxv8i16_1(<vscale x 8 x i16> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i16> poison, i16 -1, i32 0
define <vscale x 16 x i16> @vxor_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i16> @llvm.vp.xor.nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i16> @vxor_vx_nxv16i16(<vscale x 16 x i16> %va, i16 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 %b, i32 0
define <vscale x 16 x i16> @vxor_vi_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv16i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 7, i32 0
define <vscale x 16 x i16> @vxor_vi_nxv16i16_1(<vscale x 16 x i16> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv16i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i16> poison, i16 -1, i32 0
define <vscale x 32 x i16> @vxor_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 32 x i16> @llvm.vp.xor.nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %b, <vscale x 32 x i1> %m, i32 %evl)
define <vscale x 32 x i16> @vxor_vx_nxv32i16(<vscale x 32 x i16> %va, i16 %b, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e16, m8, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 %b, i32 0
define <vscale x 32 x i16> @vxor_vi_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv32i16:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 7, i32 0
define <vscale x 32 x i16> @vxor_vi_nxv32i16_1(<vscale x 32 x i16> %va, <vscale x 32 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv32i16_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 32 x i16> poison, i16 -1, i32 0
define <vscale x 1 x i32> @vxor_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i32> @llvm.vp.xor.nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %b, <vscale x 1 x i1> %m, i32 %evl)
define <vscale x 1 x i32> @vxor_vx_nxv1i32(<vscale x 1 x i32> %va, i32 %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 %b, i32 0
define <vscale x 1 x i32> @vxor_vi_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 7, i32 0
define <vscale x 1 x i32> @vxor_vi_nxv1i32_1(<vscale x 1 x i32> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i32> poison, i32 -1, i32 0
define <vscale x 2 x i32> @vxor_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i32> @llvm.vp.xor.nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %b, <vscale x 2 x i1> %m, i32 %evl)
define <vscale x 2 x i32> @vxor_vx_nxv2i32(<vscale x 2 x i32> %va, i32 %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 %b, i32 0
define <vscale x 2 x i32> @vxor_vi_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 7, i32 0
define <vscale x 2 x i32> @vxor_vi_nxv2i32_1(<vscale x 2 x i32> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i32> poison, i32 -1, i32 0
define <vscale x 4 x i32> @vxor_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i32> @llvm.vp.xor.nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %b, <vscale x 4 x i1> %m, i32 %evl)
define <vscale x 4 x i32> @vxor_vx_nxv4i32(<vscale x 4 x i32> %va, i32 %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 %b, i32 0
define <vscale x 4 x i32> @vxor_vi_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 7, i32 0
define <vscale x 4 x i32> @vxor_vi_nxv4i32_1(<vscale x 4 x i32> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i32> poison, i32 -1, i32 0
define <vscale x 8 x i32> @vxor_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i32> @llvm.vp.xor.nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %b, <vscale x 8 x i1> %m, i32 %evl)
define <vscale x 8 x i32> @vxor_vx_nxv8i32(<vscale x 8 x i32> %va, i32 %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 %b, i32 0
define <vscale x 8 x i32> @vxor_vi_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 7, i32 0
define <vscale x 8 x i32> @vxor_vi_nxv8i32_1(<vscale x 8 x i32> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i32> poison, i32 -1, i32 0
define <vscale x 16 x i32> @vxor_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 16 x i32> @llvm.vp.xor.nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %b, <vscale x 16 x i1> %m, i32 %evl)
define <vscale x 16 x i32> @vxor_vx_nxv16i32(<vscale x 16 x i32> %va, i32 %b, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vx_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a1, e32, m8, ta, ma
; CHECK-NEXT: vxor.vx v8, v8, a0, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 %b, i32 0
define <vscale x 16 x i32> @vxor_vi_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv16i32:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 7, i32 0
define <vscale x 16 x i32> @vxor_vi_nxv16i32_1(<vscale x 16 x i32> %va, <vscale x 16 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv16i32_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 16 x i32> poison, i32 -1, i32 0
define <vscale x 1 x i64> @vxor_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v9, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 1 x i64> @llvm.vp.xor.nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %b, <vscale x 1 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m1, ta, ma
; RV32-NEXT: vlse64.v v9, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v9, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv1i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 %b, i32 0
define <vscale x 1 x i64> @vxor_vi_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 7, i32 0
define <vscale x 1 x i64> @vxor_vi_nxv1i64_1(<vscale x 1 x i64> %va, <vscale x 1 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv1i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 1 x i64> poison, i64 -1, i32 0
define <vscale x 2 x i64> @vxor_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v10, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 2 x i64> @llvm.vp.xor.nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %b, <vscale x 2 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m2, ta, ma
; RV32-NEXT: vlse64.v v10, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m2, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v10, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv2i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m2, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 %b, i32 0
define <vscale x 2 x i64> @vxor_vi_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 7, i32 0
define <vscale x 2 x i64> @vxor_vi_nxv2i64_1(<vscale x 2 x i64> %va, <vscale x 2 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv2i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 2 x i64> poison, i64 -1, i32 0
define <vscale x 4 x i64> @vxor_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v12, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 4 x i64> @llvm.vp.xor.nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %b, <vscale x 4 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m4, ta, ma
; RV32-NEXT: vlse64.v v12, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v12, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv4i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 %b, i32 0
define <vscale x 4 x i64> @vxor_vi_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 7, i32 0
define <vscale x 4 x i64> @vxor_vi_nxv4i64_1(<vscale x 4 x i64> %va, <vscale x 4 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv4i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 4 x i64> poison, i64 -1, i32 0
define <vscale x 8 x i64> @vxor_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vv_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vxor.vv v8, v8, v16, v0.t
; CHECK-NEXT: ret
%v = call <vscale x 8 x i64> @llvm.vp.xor.nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %b, <vscale x 8 x i1> %m, i32 %evl)
; RV32-NEXT: addi a0, sp, 8
; RV32-NEXT: vsetvli a1, zero, e64, m8, ta, ma
; RV32-NEXT: vlse64.v v16, (a0), zero
-; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, mu
+; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma
; RV32-NEXT: vxor.vv v8, v8, v16, v0.t
; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: ret
;
; RV64-LABEL: vxor_vx_nxv8i64:
; RV64: # %bb.0:
-; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, mu
+; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma
; RV64-NEXT: vxor.vx v8, v8, a0, v0.t
; RV64-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 %b, i32 0
define <vscale x 8 x i64> @vxor_vi_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i64:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vxor.vi v8, v8, 7, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 7, i32 0
define <vscale x 8 x i64> @vxor_vi_nxv8i64_1(<vscale x 8 x i64> %va, <vscale x 8 x i1> %m, i32 zeroext %evl) {
; CHECK-LABEL: vxor_vi_nxv8i64_1:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, mu
+; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma
; CHECK-NEXT: vnot.v v8, v8, v0.t
; CHECK-NEXT: ret
%elt.head = insertelement <vscale x 8 x i64> poison, i64 -1, i32 0