multiclass VPseudoReductionV_VS {
foreach m = MxList.m in {
- let ForceTailAgnostic = true in
defm _VS : VPseudoTernary<V_M1.vrclass, m.vrclass, V_M1.vrclass, m>;
}
}
(ins m.vrclass:$rs2, ixlenimm:$sew),
[]>, RISCVVPseudo;
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VMV_S_X,
- ForceTailAgnostic = true, Constraints = "$rd = $rs1" in
+ Constraints = "$rd = $rs1" in
def PseudoVMV_S_X # "_" # m.MX: Pseudo<(outs m.vrclass:$rd),
(ins m.vrclass:$rs1, GPR:$rs2,
AVL:$vl, ixlenimm:$sew),
ixlenimm:$sew),
[]>, RISCVVPseudo;
let HasVLOp = 1, HasSEWOp = 1, BaseInstr = VFMV_S_F,
- ForceTailAgnostic = true, Constraints = "$rd = $rs1" in
+ Constraints = "$rd = $rs1" in
def "PseudoVFMV_S_" # f.FX # "_" # m.MX :
Pseudo<(outs m.vrclass:$rd),
(ins m.vrclass:$rs1, f.fprclass:$rs2,
; LMULMAX2-RV64-NEXT: or a1, a1, a3
; LMULMAX2-RV64-NEXT: or a1, a1, a4
; LMULMAX2-RV64-NEXT: or a1, a1, a2
+; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX2-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX2-RV64-NEXT: ret
;
; LMULMAX1-RV64-NEXT: or a1, a1, a3
; LMULMAX1-RV64-NEXT: or a1, a1, a4
; LMULMAX1-RV64-NEXT: or a1, a1, a2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
; LMULMAX1-RV64-NEXT: or a1, a1, a5
; LMULMAX1-RV64-NEXT: or a1, a1, a3
; LMULMAX1-RV64-NEXT: or a1, a1, a2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v25, 1
; LMULMAX1-RV64-NEXT: or a1, a1, a4
; LMULMAX1-RV64-NEXT: or a1, a1, a3
; LMULMAX1-RV64-NEXT: or a1, a1, a2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vse64.v v27, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v26, (a6)
; LMULMAX1-RV64-NEXT: ret
; LMULMAX2-RV64-NEXT: and a1, a1, a4
; LMULMAX2-RV64-NEXT: mul a1, a1, a5
; LMULMAX2-RV64-NEXT: srli a1, a1, 56
+; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX2-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX2-RV64-NEXT: ret
;
; LMULMAX1-RV64-NEXT: and a1, a1, a4
; LMULMAX1-RV64-NEXT: mul a1, a1, a5
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
; LMULMAX1-RV64-NEXT: and a1, a1, a4
; LMULMAX1-RV64-NEXT: mul a1, a1, a5
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vslidedown.vi v27, v25, 1
; LMULMAX1-RV64-NEXT: and a1, a1, a4
; LMULMAX1-RV64-NEXT: mul a1, a1, a5
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vse64.v v27, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v26, (a6)
; LMULMAX1-RV64-NEXT: ret
; LMULMAX2-RV64-NEXT: and a1, a1, a4
; LMULMAX2-RV64-NEXT: mul a1, a1, a5
; LMULMAX2-RV64-NEXT: srli a1, a1, 56
+; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX2-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX2-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX2-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX2-RV64-NEXT: ret
;
; LMULMAX1-RV64-NEXT: and a1, a1, a4
; LMULMAX1-RV64-NEXT: mul a1, a1, a5
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: ret
%a = load <2 x i64>, <2 x i64>* %x
; LMULMAX1-RV64-NEXT: and a2, a2, a5
; LMULMAX1-RV64-NEXT: mul a2, a2, a1
; LMULMAX1-RV64-NEXT: srli a2, a2, 56
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a2
; LMULMAX1-RV64-NEXT: vsetivli zero, 1, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vslidedown.vi v26, v25, 1
; LMULMAX1-RV64-NEXT: and a2, a2, a5
; LMULMAX1-RV64-NEXT: mul a1, a2, a1
; LMULMAX1-RV64-NEXT: srli a1, a1, 56
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v26, a1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
; LMULMAX1-RV64-NEXT: vse64.v v27, (a6)
; LMULMAX1-RV64-NEXT: ret
; CHECK-NEXT: vsetivli zero, 1, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.i v25, 0
; CHECK-NEXT: vmerge.vim v25, v25, 1, v0
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v25, a0
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: ret
; RV32-NEXT: addi a4, a0, 20
; RV32-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV32-NEXT: vlse32.v v26, (a4), zero
+; RV32-NEXT: vsetvli zero, zero, e32,m1,tu,mu
; RV32-NEXT: vmv.s.x v26, a3
; RV32-NEXT: vsetvli zero, zero, e64,m2,tu,mu
; RV32-NEXT: vslideup.vi v28, v26, 2
; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: addi a1, zero, -1
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu
; CHECK-NEXT: vmv.s.x v28, a1
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
; CHECK-NEXT: addi a1, zero, 6
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu
; CHECK-NEXT: vmv.s.x v28, a1
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: ret
%a = load <8 x i64>, <8 x i64>* %x
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v28, (a0)
+; CHECK-NEXT: addi a2, zero, 6
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,tu,mu
+; CHECK-NEXT: vmv.s.x v28, a2
+; CHECK-NEXT: vsetvli zero, zero, e64,m4,ta,mu
; CHECK-NEXT: vle64.v v8, (a1)
-; CHECK-NEXT: addi a1, zero, 6
-; CHECK-NEXT: vmv.s.x v28, a1
; CHECK-NEXT: vadd.vv v28, v28, v8
; CHECK-NEXT: vse64.v v28, (a0)
; CHECK-NEXT: ret
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.i v25, -1
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v25, zero
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vse8.v v25, (a0)
; CHECK-NEXT: ret
store <2 x i8> <i8 0, i8 -1>, <2 x i8>* %x
; RV64-NEXT: addi a1, a1, -455
; RV64-NEXT: slli a1, a1, 13
; RV64-NEXT: addi a1, a1, -910
+; RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; RV64-NEXT: vmv.s.x v25, a1
+; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV64-NEXT: vse64.v v25, (a0)
; RV64-NEXT: ret
store <2 x i64> <i64 2049638230412172402, i64 -1>, <2 x i64>* %x
; CHECK-NEXT: vsetivli zero, 8, e16,m1,ta,mu
; CHECK-NEXT: vmv.v.i v28, 0
; CHECK-NEXT: lui a1, 1048568
+; CHECK-NEXT: vsetvli zero, zero, e16,m1,tu,mu
; CHECK-NEXT: vmv1r.v v29, v28
; CHECK-NEXT: vmv.s.x v29, a1
; CHECK-NEXT: vsetivli zero, 7, e16,m1,tu,mu
; RV64-NEXT: vle64.v v25, (a0)
; RV64-NEXT: vmv.v.i v26, 2
; RV64-NEXT: addi a1, zero, 1
+; RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; RV64-NEXT: vmv.s.x v26, a1
; RV64-NEXT: lui a1, 1035469
; RV64-NEXT: addiw a1, a1, -819
; RV64-NEXT: addi a1, a1, -819
; RV64-NEXT: slli a1, a1, 12
; RV64-NEXT: addi a1, a1, -819
+; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV64-NEXT: vmv.v.x v27, a1
; RV64-NEXT: lui a1, 1026731
; RV64-NEXT: addiw a1, a1, -1365
; RV64-NEXT: addi a1, a1, -1365
; RV64-NEXT: slli a1, a1, 12
; RV64-NEXT: addi a1, a1, -1365
+; RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; RV64-NEXT: vmv.s.x v27, a1
+; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV64-NEXT: vmulhu.vv v25, v25, v27
; RV64-NEXT: vsrl.vv v25, v25, v26
; RV64-NEXT: vse64.v v25, (a0)
; RV32-NEXT: vsetivli zero, 4, e32,m1,ta,mu
; RV32-NEXT: vmv.v.x v26, a2
; RV32-NEXT: addi a1, a1, 1366
+; RV32-NEXT: vsetvli zero, zero, e32,m1,tu,mu
; RV32-NEXT: vmv.s.x v26, a1
; RV32-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; RV32-NEXT: vmulh.vv v26, v25, v26
; RV64-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; RV64-NEXT: vle64.v v25, (a0)
; RV64-NEXT: vmv.v.i v26, -1
+; RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; RV64-NEXT: vmv.s.x v26, zero
+; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV64-NEXT: vmul.vv v26, v25, v26
; RV64-NEXT: lui a1, 21845
; RV64-NEXT: addiw a1, a1, 1365
; RV64-NEXT: addi a2, a1, 1365
; RV64-NEXT: vmv.v.x v27, a2
; RV64-NEXT: addi a1, a1, 1366
+; RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; RV64-NEXT: vmv.s.x v27, a1
+; RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; RV64-NEXT: vmulh.vv v25, v25, v27
; RV64-NEXT: vadd.vv v25, v25, v26
; RV64-NEXT: addi a1, zero, 63
; LMULMAX1-RV64-NEXT: vmv.v.i v27, 0
; LMULMAX1-RV64-NEXT: addi a2, zero, -1
; LMULMAX1-RV64-NEXT: slli a2, a2, 63
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a2
; LMULMAX1-RV64-NEXT: lui a2, 1044935
; LMULMAX1-RV64-NEXT: addiw a2, a2, 455
; LMULMAX1-RV64-NEXT: addi a2, a2, 455
; LMULMAX1-RV64-NEXT: slli a2, a2, 13
; LMULMAX1-RV64-NEXT: addi a2, a2, 911
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vmv.v.x v28, a2
; LMULMAX1-RV64-NEXT: lui a2, 4681
; LMULMAX1-RV64-NEXT: addiw a2, a2, 585
; LMULMAX1-RV64-NEXT: addi a2, a2, 585
; LMULMAX1-RV64-NEXT: slli a2, a2, 13
; LMULMAX1-RV64-NEXT: addi a2, a2, 1171
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v28, a2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vmulhu.vv v28, v26, v28
; LMULMAX1-RV64-NEXT: vsub.vv v26, v26, v28
; LMULMAX1-RV64-NEXT: vmulhu.vv v26, v26, v27
; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v28
; LMULMAX1-RV64-NEXT: vmv.v.i v27, 3
; LMULMAX1-RV64-NEXT: addi a2, zero, 2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vsrl.vv v26, v26, v27
; LMULMAX1-RV64-NEXT: vmv.v.i v27, 2
; LMULMAX1-RV64-NEXT: addi a2, zero, 1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, a2
; LMULMAX1-RV64-NEXT: lui a2, 1035469
; LMULMAX1-RV64-NEXT: addiw a2, a2, -819
; LMULMAX1-RV64-NEXT: addi a2, a2, -819
; LMULMAX1-RV64-NEXT: slli a2, a2, 12
; LMULMAX1-RV64-NEXT: addi a2, a2, -819
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vmv.v.x v28, a2
; LMULMAX1-RV64-NEXT: lui a2, 1026731
; LMULMAX1-RV64-NEXT: addiw a2, a2, -1365
; LMULMAX1-RV64-NEXT: addi a2, a2, -1365
; LMULMAX1-RV64-NEXT: slli a2, a2, 12
; LMULMAX1-RV64-NEXT: addi a2, a2, -1365
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v28, a2
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-RV64-NEXT: vmulhu.vv v25, v25, v28
; LMULMAX1-RV64-NEXT: vsrl.vv v25, v25, v27
; LMULMAX1-RV64-NEXT: vse64.v v25, (a0)
; LMULMAX1-RV64-LABEL: mulhs_v4i64:
; LMULMAX1-RV64: # %bb.0:
; LMULMAX1-RV64-NEXT: vsetivli zero, 2, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vle64.v v25, (a0)
; LMULMAX1-RV64-NEXT: addi a1, a0, 16
-; LMULMAX1-RV64-NEXT: vle64.v v25, (a1)
-; LMULMAX1-RV64-NEXT: vle64.v v26, (a0)
+; LMULMAX1-RV64-NEXT: vle64.v v26, (a1)
; LMULMAX1-RV64-NEXT: vmv.v.i v27, -1
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v27, zero
-; LMULMAX1-RV64-NEXT: vmul.vv v28, v25, v27
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vmul.vv v28, v26, v27
; LMULMAX1-RV64-NEXT: lui a2, 21845
; LMULMAX1-RV64-NEXT: addiw a2, a2, 1365
; LMULMAX1-RV64-NEXT: slli a2, a2, 12
; LMULMAX1-RV64-NEXT: addi a3, a2, 1365
; LMULMAX1-RV64-NEXT: vmv.v.x v29, a3
; LMULMAX1-RV64-NEXT: addi a2, a2, 1366
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-RV64-NEXT: vmv.s.x v29, a2
-; LMULMAX1-RV64-NEXT: vmulh.vv v25, v25, v29
-; LMULMAX1-RV64-NEXT: vadd.vv v25, v25, v28
+; LMULMAX1-RV64-NEXT: vsetvli zero, zero, e64,m1,ta,mu
+; LMULMAX1-RV64-NEXT: vmulh.vv v26, v26, v29
+; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v28
; LMULMAX1-RV64-NEXT: addi a2, zero, 63
-; LMULMAX1-RV64-NEXT: vsrl.vx v28, v25, a2
+; LMULMAX1-RV64-NEXT: vsrl.vx v28, v26, a2
; LMULMAX1-RV64-NEXT: vid.v v30
-; LMULMAX1-RV64-NEXT: vsra.vv v25, v25, v30
-; LMULMAX1-RV64-NEXT: vadd.vv v25, v25, v28
-; LMULMAX1-RV64-NEXT: vmul.vv v27, v26, v27
-; LMULMAX1-RV64-NEXT: vmulh.vv v26, v26, v29
-; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v27
-; LMULMAX1-RV64-NEXT: vsrl.vx v27, v26, a2
; LMULMAX1-RV64-NEXT: vsra.vv v26, v26, v30
-; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v27
-; LMULMAX1-RV64-NEXT: vse64.v v26, (a0)
-; LMULMAX1-RV64-NEXT: vse64.v v25, (a1)
+; LMULMAX1-RV64-NEXT: vadd.vv v26, v26, v28
+; LMULMAX1-RV64-NEXT: vmul.vv v27, v25, v27
+; LMULMAX1-RV64-NEXT: vmulh.vv v25, v25, v29
+; LMULMAX1-RV64-NEXT: vadd.vv v25, v25, v27
+; LMULMAX1-RV64-NEXT: vsrl.vx v27, v25, a2
+; LMULMAX1-RV64-NEXT: vsra.vv v25, v25, v30
+; LMULMAX1-RV64-NEXT: vadd.vv v25, v25, v27
+; LMULMAX1-RV64-NEXT: vse64.v v25, (a0)
+; LMULMAX1-RV64-NEXT: vse64.v v26, (a1)
; LMULMAX1-RV64-NEXT: ret
%a = load <4 x i64>, <4 x i64>* %x
%b = sdiv <4 x i64> %a, <i64 3, i64 -3, i64 3, i64 -3>
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e8,mf8,ta,mu
; CHECK-NEXT: vmv.v.x v25, a1
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v25, a0
+; CHECK-NEXT: vsetvli zero, zero, e8,mf8,ta,mu
; CHECK-NEXT: vand.vi v25, v25, 1
; CHECK-NEXT: vmsne.vi v0, v25, 0
; CHECK-NEXT: ret
; CHECK-NEXT: vfmv.f.s ft1, v25
; CHECK-NEXT: vsetivli zero, 2, e16,mf4,ta,mu
; CHECK-NEXT: vfmv.v.f v8, ft1
+; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, ft0
; CHECK-NEXT: ret
%v = select i1 %c, <2 x half> %a, <2 x half> %b
; CHECK-NEXT: .LBB1_5:
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: .LBB1_6:
+; CHECK-NEXT: vsetvli zero, zero, e16,mf4,tu,mu
; CHECK-NEXT: vfmv.s.f v25, ft0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vfmv.f.s ft1, v25
; CHECK-NEXT: vsetivli zero, 2, e32,mf2,ta,mu
; CHECK-NEXT: vfmv.v.f v8, ft1
+; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, ft0
; CHECK-NEXT: ret
%v = select i1 %c, <2 x float> %a, <2 x float> %b
; CHECK-NEXT: .LBB9_5:
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: .LBB9_6:
+; CHECK-NEXT: vsetvli zero, zero, e32,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v25, ft0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: ret
; CHECK-NEXT: vfmv.f.s ft1, v25
; CHECK-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; CHECK-NEXT: vfmv.v.f v8, ft1
+; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, ft0
; CHECK-NEXT: ret
%v = select i1 %c, <2 x double> %a, <2 x double> %b
; CHECK-NEXT: .LBB17_5:
; CHECK-NEXT: vfmv.f.s ft0, v8
; CHECK-NEXT: .LBB17_6:
+; CHECK-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v25, ft0
; CHECK-NEXT: vmv1r.v v8, v25
; CHECK-NEXT: ret
; LMULMAX1-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v9, 3
; LMULMAX1-NEXT: addi a0, zero, 2
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v9, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vid.v v8
; LMULMAX1-NEXT: ret
;
; LMULMAX1-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v9, 3
; LMULMAX1-NEXT: addi a0, zero, 2
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v9, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v10, 5
; LMULMAX1-NEXT: addi a0, zero, 4
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v10, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v11, 7
; LMULMAX1-NEXT: addi a0, zero, 6
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v11, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vid.v v8
; LMULMAX1-NEXT: ret
;
; LMULMAX1-NEXT: vsetivli zero, 2, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v9, 3
; LMULMAX1-NEXT: addi a0, zero, 2
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v9, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v10, 5
; LMULMAX1-NEXT: addi a0, zero, 4
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v10, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v11, 7
; LMULMAX1-NEXT: addi a0, zero, 6
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v11, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v12, 9
; LMULMAX1-NEXT: addi a0, zero, 8
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v12, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v13, 11
; LMULMAX1-NEXT: addi a0, zero, 10
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v13, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v14, 13
; LMULMAX1-NEXT: addi a0, zero, 12
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v14, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vmv.v.i v15, 15
; LMULMAX1-NEXT: addi a0, zero, 14
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,tu,mu
; LMULMAX1-NEXT: vmv.s.x v15, a0
+; LMULMAX1-NEXT: vsetvli zero, zero, e64,m1,ta,mu
; LMULMAX1-NEXT: vid.v v8
; LMULMAX1-NEXT: ret
;
; RV32-NEXT: lbu a1, 0(a1)
; RV32-NEXT: slli a2, a2, 8
; RV32-NEXT: or a1, a2, a1
-; RV32-NEXT: vsetivli zero, 2, e16,mf4,ta,mu
+; RV32-NEXT: vsetivli zero, 2, e16,mf4,tu,mu
; RV32-NEXT: vmv.s.x v9, a1
; RV32-NEXT: .LBB4_2: # %else
; RV32-NEXT: andi a0, a0, 2
; RV64-NEXT: lbu a1, 0(a1)
; RV64-NEXT: slli a2, a2, 8
; RV64-NEXT: or a1, a2, a1
-; RV64-NEXT: vsetivli zero, 2, e16,mf4,ta,mu
+; RV64-NEXT: vsetivli zero, 2, e16,mf4,tu,mu
; RV64-NEXT: vmv.s.x v9, a1
; RV64-NEXT: .LBB4_2: # %else
; RV64-NEXT: andi a0, a0, 2
; RV64-NEXT: lwu a1, 0(a1)
; RV64-NEXT: slli a2, a2, 32
; RV64-NEXT: or a1, a2, a1
-; RV64-NEXT: vsetivli zero, 2, e64,m1,ta,mu
+; RV64-NEXT: vsetivli zero, 2, e64,m1,tu,mu
; RV64-NEXT: vmv.s.x v9, a1
; RV64-NEXT: .LBB5_2: # %else
; RV64-NEXT: andi a0, a0, 2
define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv1f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,mf4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv2f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv4f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv8f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv16f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv32f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv1f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv2f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv4f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv8f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv16f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv1f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv2f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv4f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv8f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
define <vscale x 1 x half> @insertelt_nxv1f16_0(<vscale x 1 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv1f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,mf4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x half> %v, half %elt, i32 0
define <vscale x 2 x half> @insertelt_nxv2f16_0(<vscale x 2 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv2f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x half> %v, half %elt, i32 0
define <vscale x 4 x half> @insertelt_nxv4f16_0(<vscale x 4 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv4f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x half> %v, half %elt, i32 0
define <vscale x 8 x half> @insertelt_nxv8f16_0(<vscale x 8 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv8f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x half> %v, half %elt, i32 0
define <vscale x 16 x half> @insertelt_nxv16f16_0(<vscale x 16 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv16f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x half> %v, half %elt, i32 0
define <vscale x 32 x half> @insertelt_nxv32f16_0(<vscale x 32 x half> %v, half %elt) {
; CHECK-LABEL: insertelt_nxv32f16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e16,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x half> %v, half %elt, i32 0
define <vscale x 1 x float> @insertelt_nxv1f32_0(<vscale x 1 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv1f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x float> %v, float %elt, i32 0
define <vscale x 2 x float> @insertelt_nxv2f32_0(<vscale x 2 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv2f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x float> %v, float %elt, i32 0
define <vscale x 4 x float> @insertelt_nxv4f32_0(<vscale x 4 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv4f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x float> %v, float %elt, i32 0
define <vscale x 8 x float> @insertelt_nxv8f32_0(<vscale x 8 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv8f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x float> %v, float %elt, i32 0
define <vscale x 16 x float> @insertelt_nxv16f32_0(<vscale x 16 x float> %v, float %elt) {
; CHECK-LABEL: insertelt_nxv16f32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e32,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x float> %v, float %elt, i32 0
define <vscale x 1 x double> @insertelt_nxv1f64_0(<vscale x 1 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv1f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x double> %v, double %elt, i32 0
define <vscale x 2 x double> @insertelt_nxv2f64_0(<vscale x 2 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv2f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x double> %v, double %elt, i32 0
define <vscale x 4 x double> @insertelt_nxv4f64_0(<vscale x 4 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv4f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x double> %v, double %elt, i32 0
define <vscale x 8 x double> @insertelt_nxv8f64_0(<vscale x 8 x double> %v, double %elt) {
; CHECK-LABEL: insertelt_nxv8f64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli a0, zero, e64,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x double> %v, double %elt, i32 0
define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv64i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv1i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 %elt) {
; CHECK-LABEL: insertelt_nxv16i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
; CHECK-LABEL: insertelt_nxv2i64_0_c10:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, 10
-; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 10, i32 0
; CHECK-LABEL: insertelt_nxv2i64_0_cn1:
; CHECK: # %bb.0:
; CHECK-NEXT: addi a0, zero, -1
-; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 -1, i32 0
define <vscale x 1 x i8> @insertelt_nxv1i8_0(<vscale x 1 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i8> %v, i8 %elt, i32 0
define <vscale x 2 x i8> @insertelt_nxv2i8_0(<vscale x 2 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i8> %v, i8 %elt, i32 0
define <vscale x 4 x i8> @insertelt_nxv4i8_0(<vscale x 4 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i8> %v, i8 %elt, i32 0
define <vscale x 8 x i8> @insertelt_nxv8i8_0(<vscale x 8 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i8> %v, i8 %elt, i32 0
define <vscale x 16 x i8> @insertelt_nxv16i8_0(<vscale x 16 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i8> %v, i8 %elt, i32 0
define <vscale x 32 x i8> @insertelt_nxv32i8_0(<vscale x 32 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i8> %v, i8 %elt, i32 0
define <vscale x 64 x i8> @insertelt_nxv64i8_0(<vscale x 64 x i8> %v, i8 signext %elt) {
; CHECK-LABEL: insertelt_nxv64i8_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e8,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 64 x i8> %v, i8 %elt, i32 0
define <vscale x 1 x i16> @insertelt_nxv1i16_0(<vscale x 1 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i16> %v, i16 %elt, i32 0
define <vscale x 2 x i16> @insertelt_nxv2i16_0(<vscale x 2 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i16> %v, i16 %elt, i32 0
define <vscale x 4 x i16> @insertelt_nxv4i16_0(<vscale x 4 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i16> %v, i16 %elt, i32 0
define <vscale x 8 x i16> @insertelt_nxv8i16_0(<vscale x 8 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i16> %v, i16 %elt, i32 0
define <vscale x 16 x i16> @insertelt_nxv16i16_0(<vscale x 16 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i16> %v, i16 %elt, i32 0
define <vscale x 32 x i16> @insertelt_nxv32i16_0(<vscale x 32 x i16> %v, i16 signext %elt) {
; CHECK-LABEL: insertelt_nxv32i16_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e16,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 32 x i16> %v, i16 %elt, i32 0
define <vscale x 1 x i32> @insertelt_nxv1i32_0(<vscale x 1 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv1i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i32> %v, i32 %elt, i32 0
define <vscale x 2 x i32> @insertelt_nxv2i32_0(<vscale x 2 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv2i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i32> %v, i32 %elt, i32 0
define <vscale x 4 x i32> @insertelt_nxv4i32_0(<vscale x 4 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv4i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i32> %v, i32 %elt, i32 0
define <vscale x 8 x i32> @insertelt_nxv8i32_0(<vscale x 8 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv8i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i32> %v, i32 %elt, i32 0
define <vscale x 16 x i32> @insertelt_nxv16i32_0(<vscale x 16 x i32> %v, i32 signext %elt) {
; CHECK-LABEL: insertelt_nxv16i32_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e32,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 16 x i32> %v, i32 %elt, i32 0
define <vscale x 1 x i64> @insertelt_nxv1i64_0(<vscale x 1 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv1i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e64,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 1 x i64> %v, i64 %elt, i32 0
define <vscale x 2 x i64> @insertelt_nxv2i64_0(<vscale x 2 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv2i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e64,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 2 x i64> %v, i64 %elt, i32 0
define <vscale x 4 x i64> @insertelt_nxv4i64_0(<vscale x 4 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv4i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e64,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 4 x i64> %v, i64 %elt, i32 0
define <vscale x 8 x i64> @insertelt_nxv8i64_0(<vscale x 8 x i64> %v, i64 %elt) {
; CHECK-LABEL: insertelt_nxv8i64_0:
; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli a1, zero, e64,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
%r = insertelement <vscale x 8 x i64> %v, i64 %elt, i32 0
define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x half> @intrinsic_vfmv.s.f_f_nxv1f16(<vscale x 1 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x half> @intrinsic_vfmv.s.f_f_nxv2f16(<vscale x 2 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfmv.s.f_f_nxv4f16(<vscale x 4 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x half> @intrinsic_vfmv.s.f_f_nxv8f16(<vscale x 8 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x half> @intrinsic_vfmv.s.f_f_nxv16f16(<vscale x 16 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 32 x half> @intrinsic_vfmv.s.f_f_nxv32f16(<vscale x 32 x half> %0, half %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv32f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x float> @intrinsic_vfmv.s.f_f_nxv1f32(<vscale x 1 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfmv.s.f_f_nxv2f32(<vscale x 2 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x float> @intrinsic_vfmv.s.f_f_nxv4f32(<vscale x 4 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x float> @intrinsic_vfmv.s.f_f_nxv8f32(<vscale x 8 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x float> @intrinsic_vfmv.s.f_f_nxv16f32(<vscale x 16 x float> %0, float %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv16f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfmv.s.f_f_nxv1f64(<vscale x 1 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x double> @intrinsic_vfmv.s.f_f_nxv2f64(<vscale x 2 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv2f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x double> @intrinsic_vfmv.s.f_f_nxv4f64(<vscale x 4 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv4f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x double> @intrinsic_vfmv.s.f_f_nxv8f64(<vscale x 8 x double> %0, double %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vfmv.s.f_f_nxv8f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfmv.s.f v8, fa0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredmin_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredosum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv1f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv2f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv4f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv8f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv16f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x half> @intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv4f16_nxv32f16_nxv4f16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv1f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv2f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv4f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv8f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv2f32_nxv16f32_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv1f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv2f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv4f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfredsum_mask_vs_nxv1f64_nxv8f64_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vfredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredosum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredosum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x half> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv1f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x half> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv2f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x half> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv4f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x half> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv8f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x half> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv16f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x float> @intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32(<vscale x 2 x float> %0, <vscale x 32 x half> %1, <vscale x 2 x float> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv2f32_nxv32f16_nxv2f32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x float> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv1f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x float> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv2f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x float> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv4f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x float> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv8f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x double> @intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64(<vscale x 1 x double> %0, <vscale x 16 x float> %1, <vscale x 1 x double> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vfwredsum_mask_vs_nxv1f64_nxv16f32_nxv1f64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vfwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i8> @intrinsic_vmv.s.x_x_nxv1i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,mf8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i8> @intrinsic_vmv.s.x_x_nxv2i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i8> @intrinsic_vmv.s.x_x_nxv4i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vmv.s.x_x_nxv8i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x i8> @intrinsic_vmv.s.x_x_nxv16i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 32 x i8> @intrinsic_vmv.s.x_x_nxv32i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 64 x i8> @intrinsic_vmv.s.x_x_nxv64i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv64i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e8,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i16> @intrinsic_vmv.s.x_x_nxv1i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,mf4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i16> @intrinsic_vmv.s.x_x_nxv2i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vmv.s.x_x_nxv4i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i16> @intrinsic_vmv.s.x_x_nxv8i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x i16> @intrinsic_vmv.s.x_x_nxv16i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 32 x i16> @intrinsic_vmv.s.x_x_nxv32i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv32i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e16,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i32> @intrinsic_vmv.s.x_x_nxv1i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,mf2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vmv.s.x_x_nxv2i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i32> @intrinsic_vmv.s.x_x_nxv4i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i32> @intrinsic_vmv.s.x_x_nxv8i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 16 x i32> @intrinsic_vmv.s.x_x_nxv16i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv16i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e32,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vmv.s.x_x_nxv1i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e64,m1,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i64> @intrinsic_vmv.s.x_x_nxv2i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv2i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e64,m2,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i64> @intrinsic_vmv.s.x_x_nxv4i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv4i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e64,m4,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i64> @intrinsic_vmv.s.x_x_nxv8i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
; CHECK-LABEL: intrinsic_vmv.s.x_x_nxv8i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a1, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a1, e64,m8,tu,mu
; CHECK-NEXT: vmv.s.x v8, a0
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredand.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredand.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredand.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredand.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmax.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmax.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmax.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmax_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmax_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmax.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmaxu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmaxu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredmin.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredmin.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredmin.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredmin_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredmin_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredmin.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredminu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredminu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredminu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredminu_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredminu_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredminu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredor_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredsum_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredsum_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv1i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv2i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv4i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv8i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv16i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 8 x i8> @intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv8i8_nxv32i8_nxv8i8:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv1i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv2i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv4i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv8i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv16i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv4i16_nxv32i16_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv1i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv2i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv4i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv8i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv2i32_nxv16i32_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv1i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m1,tu,mu
; CHECK-NEXT: vredxor.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv2i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m2,tu,mu
; CHECK-NEXT: vredxor.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv4i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m4,tu,mu
; CHECK-NEXT: vredxor.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vredxor_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vredxor_mask_vs_nxv1i64_nxv8i64_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e64,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e64,m8,tu,mu
; CHECK-NEXT: vredxor.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsum_mask_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsum.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i8> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv1i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i8> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv2i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv4i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i8> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv8i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i8> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv16i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i8> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv32i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 4 x i16> @intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16(<vscale x 4 x i16> %0, <vscale x 64 x i8> %1, <vscale x 4 x i16> %2, <vscale x 64 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv4i16_nxv64i8_nxv4i16:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e8,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e8,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i16> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv1i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv2i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i16> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv4i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i16> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv8i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i16> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv16i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 2 x i32> @intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32(<vscale x 2 x i32> %0, <vscale x 32 x i16> %1, <vscale x 2 x i32> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv2i32_nxv32i16_nxv2i32:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e16,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e16,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv1i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,mf2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,mf2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i32> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv2i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m1,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m1,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v9, v10, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i32> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv4i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m2,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m2,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v10, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i32> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv8i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m4,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m4,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v12, v9, v0.t
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, i64 %3) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9
; CHECK-NEXT: ret
entry:
define <vscale x 1 x i64> @intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64(<vscale x 1 x i64> %0, <vscale x 16 x i32> %1, <vscale x 1 x i64> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
; CHECK-LABEL: intrinsic_vwredsumu_mask_vs_nxv1i64_nxv16i32_nxv1i64:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: vsetvli zero, a0, e32,m8,ta,mu
+; CHECK-NEXT: vsetvli zero, a0, e32,m8,tu,mu
; CHECK-NEXT: vwredsumu.vs v8, v16, v9, v0.t
; CHECK-NEXT: ret
entry:
; RV32MV-NEXT: vsll.vi v26, v25, 1
; RV32MV-NEXT: addi a1, zero, 9
; RV32MV-NEXT: vmv.v.i v27, 10
+; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV32MV-NEXT: vmv.s.x v27, a1
+; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV32MV-NEXT: vsll.vv v26, v26, v27
; RV32MV-NEXT: addi a1, zero, 2047
; RV32MV-NEXT: vand.vx v25, v25, a1
; RV32MV-NEXT: vmv.v.i v27, 0
; RV32MV-NEXT: addi a2, zero, 1
+; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV32MV-NEXT: vmv1r.v v28, v27
; RV32MV-NEXT: vmv.s.x v28, a2
+; RV32MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV32MV-NEXT: lui a2, %hi(.LCPI4_1)
; RV32MV-NEXT: addi a2, a2, %lo(.LCPI4_1)
; RV32MV-NEXT: vle16.v v29, (a2)
; RV64MV-NEXT: vsll.vi v26, v25, 1
; RV64MV-NEXT: addi a1, zero, 9
; RV64MV-NEXT: vmv.v.i v27, 10
+; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV64MV-NEXT: vmv.s.x v27, a1
+; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV64MV-NEXT: vsll.vv v26, v26, v27
; RV64MV-NEXT: addi a1, zero, 2047
; RV64MV-NEXT: vand.vx v25, v25, a1
; RV64MV-NEXT: vmv.v.i v27, 0
; RV64MV-NEXT: addi a2, zero, 1
+; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,tu,mu
; RV64MV-NEXT: vmv1r.v v28, v27
; RV64MV-NEXT: vmv.s.x v28, a2
+; RV64MV-NEXT: vsetvli zero, zero, e16,mf2,ta,mu
; RV64MV-NEXT: lui a2, %hi(.LCPI4_1)
; RV64MV-NEXT: addi a2, a2, %lo(.LCPI4_1)
; RV64MV-NEXT: vle16.v v29, (a2)