Some tests had scalable vector intrinsic names with fixed vector types.
Some had types in the wrong order.
Remove scalable vector test from fixed vector files.
Also replace insert+shuffle constexprs with fixed constant vectors.
; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
-declare <2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<2 x i16>, <2 x i1>, i32)
+declare <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16>, <2 x i1>, i32)
-define <2 x i1> @vtrunc_nxv2i1_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16:
+define <2 x i1> @vtrunc_v2i1_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i1_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<2 x i16> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
ret <2 x i1> %v
}
-define <2 x i1> @vtrunc_nxv2i1_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16_unmasked:
+define <2 x i1> @vtrunc_v2i1_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i1_v2i16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i1> %v
}
-declare <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<2 x i32>, <2 x i1>, i32)
+declare <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32>, <2 x i1>, i32)
-define <2 x i1> @vtrunc_nxv2i1_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32:
+define <2 x i1> @vtrunc_v2i1_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i1_v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
ret <2 x i1> %v
}
-define <2 x i1> @vtrunc_nxv2i1_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32_unmasked:
+define <2 x i1> @vtrunc_v2i1_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i1_v2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i1> %v
}
-declare <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<2 x i64>, <2 x i1>, i32)
+declare <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64>, <2 x i1>, i32)
-define <2 x i1> @vtrunc_nxv2i1_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64:
+define <2 x i1> @vtrunc_v2i1_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i1_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu
; CHECK-NEXT: vand.vi v8, v8, 1, v0.t
; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
ret <2 x i1> %v
}
-define <2 x i1> @vtrunc_nxv2i1_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64_unmasked:
+define <2 x i1> @vtrunc_v2i1_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i1_v2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i1> %v
}
; RUN: llc -mtriple=riscv32 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s
-declare <2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<2 x i16>, <2 x i1>, i32)
+declare <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16>, <2 x i1>, i32)
-define <2 x i7> @vtrunc_nxv2i7_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16:
+define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i7_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
ret <2 x i7> %v
}
-declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<2 x i15>, <2 x i1>, i32)
+declare <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15>, <2 x i1>, i32)
-define <2 x i8> @vtrunc_nxv2i8_nxv2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15:
+define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i15:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl)
ret <2 x i8> %v
}
-declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16>, <2 x i1>, i32)
+declare <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16>, <2 x i1>, i32)
-define <2 x i8> @vtrunc_nxv2i8_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16:
+define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i16:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl)
ret <2 x i8> %v
}
-define <2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked:
+define <2 x i8> @vtrunc_v2i8_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i16_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i8> %v
}
-declare <128 x i7> @llvm.vp.trunc.nxv128i7.nxv128i16(<128 x i16>, <128 x i1>, i32)
+declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32)
-define <128 x i7> @vtrunc_nxv128i7_nxv128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv128i7_nxv128i16:
+define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v128i7_v128i16:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
- %v = call <128 x i7> @llvm.vp.trunc.nxv128i7.nxv128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl)
+ %v = call <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl)
ret <128 x i7> %v
}
-declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32>, <2 x i1>, i32)
+declare <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32>, <2 x i1>, i32)
-define <2 x i8> @vtrunc_nxv2i8_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32:
+define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
ret <2 x i8> %v
}
-define <2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked:
+define <2 x i8> @vtrunc_v2i8_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i8> %v
}
-declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64>, <2 x i1>, i32)
+declare <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64>, <2 x i1>, i32)
-define <2 x i8> @vtrunc_nxv2i8_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64:
+define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
ret <2 x i8> %v
}
-define <2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked:
+define <2 x i8> @vtrunc_v2i8_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i8_v2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i8> %v
}
-declare <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32>, <2 x i1>, i32)
+declare <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32>, <2 x i1>, i32)
-define <2 x i16> @vtrunc_nxv2i16_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32:
+define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i16_v2i32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl)
ret <2 x i16> %v
}
-define <2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked:
+define <2 x i16> @vtrunc_v2i16_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i16_v2i32_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i16> %v
}
-declare <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64>, <2 x i1>, i32)
+declare <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64>, <2 x i1>, i32)
-define <2 x i16> @vtrunc_nxv2i16_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64:
+define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i16_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
ret <2 x i16> %v
}
-define <2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked:
+define <2 x i16> @vtrunc_v2i16_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i16_v2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i16> %v
}
-declare <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64>, <15 x i1>, i32)
+declare <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64>, <15 x i1>, i32)
-define <15 x i16> @vtrunc_nxv15i16_nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64:
+define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v15i16_v15i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu
; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t
; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t
; CHECK-NEXT: ret
- %v = call <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl)
+ %v = call <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl)
ret <15 x i16> %v
}
-declare <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64>, <2 x i1>, i32)
+declare <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64>, <2 x i1>, i32)
-define <2 x i32> @vtrunc_nxv2i32_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64:
+define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i32_v2i64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> %m, i32 %vl)
+ %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl)
ret <2 x i32> %v
}
-define <2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked:
+define <2 x i32> @vtrunc_v2i32_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v2i32_v2i64_unmasked:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma
; CHECK-NEXT: vnsrl.wi v8, v8, 0
; CHECK-NEXT: ret
- %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl)
+ %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> <i1 true, i1 true>, i32 %vl)
ret <2 x i32> %v
}
-declare <128 x i32> @llvm.vp.trunc.nxv128i64.nxv128i32(<128 x i64>, <128 x i1>, i32)
+declare <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64>, <128 x i1>, i32)
-define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv128i32_nxv128i64:
+define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v128i32_v128i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
- %v = call <128 x i32> @llvm.vp.trunc.nxv128i64.nxv128i32(<128 x i64> %a, <128 x i1> %m, i32 %vl)
+ %v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl)
ret <128 x i32> %v
}
-declare <32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<32 x i64>, <32 x i1>, i32)
+declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32)
-define <32 x i32> @vtrunc_nxv32i32_nxv32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
-; CHECK-LABEL: vtrunc_nxv32i32_nxv32i64:
+define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) {
+; CHECK-LABEL: vtrunc_v32i32_v32i64:
; CHECK: # %bb.0:
; CHECK-NEXT: addi sp, sp, -16
; CHECK-NEXT: .cfi_def_cfa_offset 16
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
- %v = call <32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<32 x i64> %a, <32 x i1> %m, i32 %vl)
+ %v = call <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64> %a, <32 x i1> %m, i32 %vl)
ret <32 x i32> %v
}
store <16 x i32> %y, <16 x i32>* %z
ret void
}
-
-define <vscale x 2 x float> @extload_nxv2f16_nxv2f32(<vscale x 2 x half>* %x) {
-; CHECK-LABEL: extload_nxv2f16_nxv2f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vle16.v v9, (a0)
-; CHECK-NEXT: vfwcvt.f.f.v v8, v9
-; CHECK-NEXT: ret
- %y = load <vscale x 2 x half>, <vscale x 2 x half>* %x
- %z = fpext <vscale x 2 x half> %y to <vscale x 2 x float>
- ret <vscale x 2 x float> %z
-}
-
-define <vscale x 2 x double> @extload_nxv2f16_nxv2f64(<vscale x 2 x half>* %x) {
-; CHECK-LABEL: extload_nxv2f16_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vle16.v v8, (a0)
-; CHECK-NEXT: vfwcvt.f.f.v v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v10
-; CHECK-NEXT: ret
- %y = load <vscale x 2 x half>, <vscale x 2 x half>* %x
- %z = fpext <vscale x 2 x half> %y to <vscale x 2 x double>
- ret <vscale x 2 x double> %z
-}
-
-define <vscale x 4 x float> @extload_nxv4f16_nxv4f32(<vscale x 4 x half>* %x) {
-; CHECK-LABEL: extload_nxv4f16_nxv4f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl1re16.v v10, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v10
-; CHECK-NEXT: ret
- %y = load <vscale x 4 x half>, <vscale x 4 x half>* %x
- %z = fpext <vscale x 4 x half> %y to <vscale x 4 x float>
- ret <vscale x 4 x float> %z
-}
-
-define <vscale x 4 x double> @extload_nxv4f16_nxv4f64(<vscale x 4 x half>* %x) {
-; CHECK-LABEL: extload_nxv4f16_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl1re16.v v8, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v12
-; CHECK-NEXT: ret
- %y = load <vscale x 4 x half>, <vscale x 4 x half>* %x
- %z = fpext <vscale x 4 x half> %y to <vscale x 4 x double>
- ret <vscale x 4 x double> %z
-}
-
-define <vscale x 8 x float> @extload_nxv8f16_nxv8f32(<vscale x 8 x half>* %x) {
-; CHECK-LABEL: extload_nxv8f16_nxv8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl2re16.v v12, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v12
-; CHECK-NEXT: ret
- %y = load <vscale x 8 x half>, <vscale x 8 x half>* %x
- %z = fpext <vscale x 8 x half> %y to <vscale x 8 x float>
- ret <vscale x 8 x float> %z
-}
-
-define <vscale x 8 x double> @extload_nxv8f16_nxv8f64(<vscale x 8 x half>* %x) {
-; CHECK-LABEL: extload_nxv8f16_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl2re16.v v8, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v16
-; CHECK-NEXT: ret
- %y = load <vscale x 8 x half>, <vscale x 8 x half>* %x
- %z = fpext <vscale x 8 x half> %y to <vscale x 8 x double>
- ret <vscale x 8 x double> %z
-}
-
-define <vscale x 16 x float> @extload_nxv16f16_nxv16f32(<vscale x 16 x half>* %x) {
-; CHECK-LABEL: extload_nxv16f16_nxv16f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl4re16.v v16, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v16
-; CHECK-NEXT: ret
- %y = load <vscale x 16 x half>, <vscale x 16 x half>* %x
- %z = fpext <vscale x 16 x half> %y to <vscale x 16 x float>
- ret <vscale x 16 x float> %z
-}
-
-define <vscale x 16 x double> @extload_nxv16f16_nxv16f64(<vscale x 16 x half>* %x) {
-; CHECK-LABEL: extload_nxv16f16_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl4re16.v v16, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v20, v16
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v20
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v24, v18
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v16, v24
-; CHECK-NEXT: ret
- %y = load <vscale x 16 x half>, <vscale x 16 x half>* %x
- %z = fpext <vscale x 16 x half> %y to <vscale x 16 x double>
- ret <vscale x 16 x double> %z
-}
-
-define void @truncstore_nxv2f32_nxv2f16(<vscale x 2 x float> %x, <vscale x 2 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv2f32_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v9, v8
-; CHECK-NEXT: vse16.v v9, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 2 x float> %x to <vscale x 2 x half>
- store <vscale x 2 x half> %y, <vscale x 2 x half>* %z
- ret void
-}
-
-define <vscale x 2 x double> @extload_nxv2f32_nxv2f64(<vscale x 2 x float>* %x) {
-; CHECK-LABEL: extload_nxv2f32_nxv2f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl1re32.v v10, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v10
-; CHECK-NEXT: ret
- %y = load <vscale x 2 x float>, <vscale x 2 x float>* %x
- %z = fpext <vscale x 2 x float> %y to <vscale x 2 x double>
- ret <vscale x 2 x double> %z
-}
-
-define void @truncstore_nxv4f32_nxv4f16(<vscale x 4 x float> %x, <vscale x 4 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv4f32_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v10, v8
-; CHECK-NEXT: vs1r.v v10, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 4 x float> %x to <vscale x 4 x half>
- store <vscale x 4 x half> %y, <vscale x 4 x half>* %z
- ret void
-}
-
-define <vscale x 4 x double> @extload_nxv4f32_nxv4f64(<vscale x 4 x float>* %x) {
-; CHECK-LABEL: extload_nxv4f32_nxv4f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl2re32.v v12, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v12
-; CHECK-NEXT: ret
- %y = load <vscale x 4 x float>, <vscale x 4 x float>* %x
- %z = fpext <vscale x 4 x float> %y to <vscale x 4 x double>
- ret <vscale x 4 x double> %z
-}
-
-define void @truncstore_nxv8f32_nxv8f16(<vscale x 8 x float> %x, <vscale x 8 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv8f32_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v12, v8
-; CHECK-NEXT: vs2r.v v12, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 8 x float> %x to <vscale x 8 x half>
- store <vscale x 8 x half> %y, <vscale x 8 x half>* %z
- ret void
-}
-
-define <vscale x 8 x double> @extload_nxv8f32_nxv8f64(<vscale x 8 x float>* %x) {
-; CHECK-LABEL: extload_nxv8f32_nxv8f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl4re32.v v16, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v16
-; CHECK-NEXT: ret
- %y = load <vscale x 8 x float>, <vscale x 8 x float>* %x
- %z = fpext <vscale x 8 x float> %y to <vscale x 8 x double>
- ret <vscale x 8 x double> %z
-}
-
-define void @truncstore_nxv16f32_nxv16f16(<vscale x 16 x float> %x, <vscale x 16 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv16f32_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v16, v8
-; CHECK-NEXT: vs4r.v v16, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 16 x float> %x to <vscale x 16 x half>
- store <vscale x 16 x half> %y, <vscale x 16 x half>* %z
- ret void
-}
-
-define <vscale x 16 x double> @extload_nxv16f32_nxv16f64(<vscale x 16 x float>* %x) {
-; CHECK-LABEL: extload_nxv16f32_nxv16f64:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vl8re32.v v24, (a0)
-; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfwcvt.f.f.v v8, v24
-; CHECK-NEXT: vfwcvt.f.f.v v16, v28
-; CHECK-NEXT: ret
- %y = load <vscale x 16 x float>, <vscale x 16 x float>* %x
- %z = fpext <vscale x 16 x float> %y to <vscale x 16 x double>
- ret <vscale x 16 x double> %z
-}
-
-define void @truncstore_nxv2f64_nxv2f16(<vscale x 2 x double> %x, <vscale x 2 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv2f64_nxv2f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v8, v10
-; CHECK-NEXT: vse16.v v8, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x half>
- store <vscale x 2 x half> %y, <vscale x 2 x half>* %z
- ret void
-}
-
-define void @truncstore_nxv2f64_nxv2f32(<vscale x 2 x double> %x, <vscale x 2 x float>* %z) {
-; CHECK-LABEL: truncstore_nxv2f64_nxv2f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v10, v8
-; CHECK-NEXT: vs1r.v v10, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 2 x double> %x to <vscale x 2 x float>
- store <vscale x 2 x float> %y, <vscale x 2 x float>* %z
- ret void
-}
-
-define void @truncstore_nxv4f64_nxv4f16(<vscale x 4 x double> %x, <vscale x 4 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv4f64_nxv4f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v8, v12
-; CHECK-NEXT: vs1r.v v8, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x half>
- store <vscale x 4 x half> %y, <vscale x 4 x half>* %z
- ret void
-}
-
-define void @truncstore_nxv4f64_nxv4f32(<vscale x 4 x double> %x, <vscale x 4 x float>* %z) {
-; CHECK-LABEL: truncstore_nxv4f64_nxv4f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v12, v8
-; CHECK-NEXT: vs2r.v v12, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 4 x double> %x to <vscale x 4 x float>
- store <vscale x 4 x float> %y, <vscale x 4 x float>* %z
- ret void
-}
-
-define void @truncstore_nxv8f64_nxv8f16(<vscale x 8 x double> %x, <vscale x 8 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv8f64_nxv8f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v8, v16
-; CHECK-NEXT: vs2r.v v8, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x half>
- store <vscale x 8 x half> %y, <vscale x 8 x half>* %z
- ret void
-}
-
-define void @truncstore_nxv8f64_nxv8f32(<vscale x 8 x double> %x, <vscale x 8 x float>* %z) {
-; CHECK-LABEL: truncstore_nxv8f64_nxv8f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v16, v8
-; CHECK-NEXT: vs4r.v v16, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 8 x double> %x to <vscale x 8 x float>
- store <vscale x 8 x float> %y, <vscale x 8 x float>* %z
- ret void
-}
-
-define void @truncstore_nxv16f64_nxv16f16(<vscale x 16 x double> %x, <vscale x 16 x half>* %z) {
-; CHECK-LABEL: truncstore_nxv16f64_nxv16f16:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfncvt.rod.f.f.w v24, v8
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v8, v24
-; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16
-; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v10, v12
-; CHECK-NEXT: vs4r.v v8, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 16 x double> %x to <vscale x 16 x half>
- store <vscale x 16 x half> %y, <vscale x 16 x half>* %z
- ret void
-}
-
-define void @truncstore_nxv16f64_nxv16f32(<vscale x 16 x double> %x, <vscale x 16 x float>* %z) {
-; CHECK-LABEL: truncstore_nxv16f64_nxv16f32:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma
-; CHECK-NEXT: vfncvt.f.f.w v24, v8
-; CHECK-NEXT: vfncvt.f.f.w v28, v16
-; CHECK-NEXT: vs8r.v v24, (a0)
-; CHECK-NEXT: ret
- %y = fptrunc <vscale x 16 x double> %x to <vscale x 16 x float>
- store <vscale x 16 x float> %y, <vscale x 16 x float>* %z
- ret void
-}
%v = call <16 x i1> @llvm.vp.xor.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl)
ret <16 x i1> %v
}
-
-declare <vscale x 1 x i1> @llvm.vp.xor.nxv1i1(<vscale x 1 x i1>, <vscale x 1 x i1>, <vscale x 1 x i1>, i32)
-
-define <vscale x 1 x i1> @xor_nxv1i1(<vscale x 1 x i1> %b, <vscale x 1 x i1> %c, <vscale x 1 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv1i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 1 x i1> @llvm.vp.xor.nxv1i1(<vscale x 1 x i1> %b, <vscale x 1 x i1> %c, <vscale x 1 x i1> %a, i32 %evl)
- ret <vscale x 1 x i1> %v
-}
-
-declare <vscale x 2 x i1> @llvm.vp.xor.nxv2i1(<vscale x 2 x i1>, <vscale x 2 x i1>, <vscale x 2 x i1>, i32)
-
-define <vscale x 2 x i1> @xor_nxv2i1(<vscale x 2 x i1> %b, <vscale x 2 x i1> %c, <vscale x 2 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv2i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 2 x i1> @llvm.vp.xor.nxv2i1(<vscale x 2 x i1> %b, <vscale x 2 x i1> %c, <vscale x 2 x i1> %a, i32 %evl)
- ret <vscale x 2 x i1> %v
-}
-
-declare <vscale x 4 x i1> @llvm.vp.xor.nxv4i1(<vscale x 4 x i1>, <vscale x 4 x i1>, <vscale x 4 x i1>, i32)
-
-define <vscale x 4 x i1> @xor_nxv4i1(<vscale x 4 x i1> %b, <vscale x 4 x i1> %c, <vscale x 4 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv4i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 4 x i1> @llvm.vp.xor.nxv4i1(<vscale x 4 x i1> %b, <vscale x 4 x i1> %c, <vscale x 4 x i1> %a, i32 %evl)
- ret <vscale x 4 x i1> %v
-}
-
-declare <vscale x 8 x i1> @llvm.vp.xor.nxv8i1(<vscale x 8 x i1>, <vscale x 8 x i1>, <vscale x 8 x i1>, i32)
-
-define <vscale x 8 x i1> @xor_nxv8i1(<vscale x 8 x i1> %b, <vscale x 8 x i1> %c, <vscale x 8 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv8i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 8 x i1> @llvm.vp.xor.nxv8i1(<vscale x 8 x i1> %b, <vscale x 8 x i1> %c, <vscale x 8 x i1> %a, i32 %evl)
- ret <vscale x 8 x i1> %v
-}
-
-declare <vscale x 16 x i1> @llvm.vp.xor.nxv16i1(<vscale x 16 x i1>, <vscale x 16 x i1>, <vscale x 16 x i1>, i32)
-
-define <vscale x 16 x i1> @xor_nxv16i1(<vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv16i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 16 x i1> @llvm.vp.xor.nxv16i1(<vscale x 16 x i1> %b, <vscale x 16 x i1> %c, <vscale x 16 x i1> %a, i32 %evl)
- ret <vscale x 16 x i1> %v
-}
-
-declare <vscale x 32 x i1> @llvm.vp.xor.nxv32i1(<vscale x 32 x i1>, <vscale x 32 x i1>, <vscale x 32 x i1>, i32)
-
-define <vscale x 32 x i1> @xor_nxv32i1(<vscale x 32 x i1> %b, <vscale x 32 x i1> %c, <vscale x 32 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv32i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 32 x i1> @llvm.vp.xor.nxv32i1(<vscale x 32 x i1> %b, <vscale x 32 x i1> %c, <vscale x 32 x i1> %a, i32 %evl)
- ret <vscale x 32 x i1> %v
-}
-
-declare <vscale x 64 x i1> @llvm.vp.xor.nxv64i1(<vscale x 64 x i1>, <vscale x 64 x i1>, <vscale x 64 x i1>, i32)
-
-define <vscale x 64 x i1> @xor_nxv64i1(<vscale x 64 x i1> %b, <vscale x 64 x i1> %c, <vscale x 64 x i1> %a, i32 zeroext %evl) {
-; CHECK-LABEL: xor_nxv64i1:
-; CHECK: # %bb.0:
-; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma
-; CHECK-NEXT: vmxor.mm v0, v0, v8
-; CHECK-NEXT: ret
- %v = call <vscale x 64 x i1> @llvm.vp.xor.nxv64i1(<vscale x 64 x i1> %b, <vscale x 64 x i1> %c, <vscale x 64 x i1> %a, i32 %evl)
- ret <vscale x 64 x i1> %v
-}
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=256 | FileCheck %s
-declare <8 x i16> @llvm.vp.merge.nxv2i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
-declare <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
-declare <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1>, <8 x float>, <8 x float>, i32)
-declare <8 x double> @llvm.vp.merge.nxv2f64(<8 x i1>, <8 x double>, <8 x double>, i32)
+declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
+declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
+declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32)
+declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32)
; Test binary operator with vp.merge and vp.smax.
-declare <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
+declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32)
define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpadd:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; Test glued node of merge should not be deleted.
-declare <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32)
+declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32)
define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpadd2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
- %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma
; CHECK-NEXT: vadd.vv v8, v9, v10
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; Test float binary operator with vp.merge and vp.fadd.
-declare <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float>, <8 x float>, <8 x i1>, i32)
+declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32)
define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x float> %y, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpfadd:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
- %b = call <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
ret <8 x float> %b
}
; Test conversion by fptosi.
-declare <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float>, <8 x i1>, i32)
+declare <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpfptosi:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i16> @llvm.vp.merge.nxv2i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
+ %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i16> @llvm.vp.merge.v8i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
ret <8 x i16> %b
}
; Test conversion by sitofp.
-declare <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64>, <8 x i1>, i32)
+declare <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64>, <8 x i1>, i32)
define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpsitofp:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
ret <8 x float> %b
}
; Test integer extension by vp.zext.
-declare <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8>, <8 x i1>, i32)
+declare <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8>, <8 x i1>, i32)
define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpzext:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; Test integer truncation by vp.trunc.
-declare <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64>, <8 x i1>, i32)
+declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32)
define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vptrunc:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; Test integer extension by vp.fpext.
-declare <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float>, <8 x i1>, i32)
+declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32)
define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpfpext:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x double> @llvm.vp.merge.nxv2f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
+ %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
ret <8 x double> %b
}
; Test integer truncation by vp.trunc.
-declare <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double>, <8 x i1>, i32)
+declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32)
define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpfptrunc:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu
; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
ret <8 x float> %b
}
; Test load operation by vp.load.
-declare <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> *, <8 x i1>, i32)
-define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, <8 x i32> * %p, <8 x i1> %m, i32 zeroext %vl) {
+declare <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>*, <8 x i1>, i32)
+
+define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, <8 x i32>* %p, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpload:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; Test result have chain and glued node.
-define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
+define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32>* %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
; CHECK-LABEL: vpmerge_vpload2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma
; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
- %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl)
+ %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
-declare <8 x i16> @llvm.vp.select.nxv2i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
-declare <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
-declare <8 x float> @llvm.vp.select.nxv2f32(<8 x i1>, <8 x float>, <8 x float>, i32)
-declare <8 x double> @llvm.vp.select.nxv2f64(<8 x i1>, <8 x double>, <8 x double>, i32)
+declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32)
+declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32)
+declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32)
+declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32)
; Test binary operator with vp.select and vp.add.
define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; CHECK-NEXT: vmseq.vv v0, v9, v10
; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
- %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma
; CHECK-NEXT: vadd.vv v8, v9, v10
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
- %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
ret <8 x float> %b
}
; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu
; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i16> @llvm.vp.select.nxv2i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
+ %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl)
ret <8 x i16> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
ret <8 x float> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vzext.vf4 v8, v9, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x double> @llvm.vp.select.nxv2f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
+ %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl)
ret <8 x double> %b
}
; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu
; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
- %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
+ %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl)
ret <8 x float> %b
}
; Test load operation by vp.load.
-define <8 x i32> @vpselect_vpload(<8 x i32> %passthru, <8 x i32> * %p, <8 x i1> %m, i32 zeroext %vl) {
+define <8 x i32> @vpselect_vpload(<8 x i32> %passthru, <8 x i32>* %p, <8 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vpselect_vpload:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
; Test result have chain and glued node.
-define <8 x i32> @vpselect_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
+define <8 x i32> @vpselect_vpload2(<8 x i32> %passthru, <8 x i32>* %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) {
; CHECK-LABEL: vpselect_vpload2:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu
; CHECK-NEXT: vmseq.vv v0, v9, v10
; CHECK-NEXT: vle32.v v8, (a0), v0.t
; CHECK-NEXT: ret
- %splat = insertelement <8 x i1> poison, i1 -1, i32 0
+ %splat = insertelement <8 x i1> poison, i1 true, i32 0
%mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer
- %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl)
- %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
- %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
+ %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl)
+ %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl)
+ %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl)
ret <8 x i32> %b
}
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv1i1(i1, <1 x i1>, <1 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv1i1:
+define signext i1 @vpreduce_smax_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv2i1(i1, <2 x i1>, <2 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv2i1:
+define signext i1 @vpreduce_smax_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv4i1(i1, <4 x i1>, <4 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv4i1:
+define signext i1 @vpreduce_smax_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv8i1(i1, <8 x i1>, <8 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv8i1:
+define signext i1 @vpreduce_smax_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv16i1(i1, <16 x i1>, <16 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv16i1:
+define signext i1 @vpreduce_smax_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv32i1(i1, <32 x i1>, <32 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv32i1:
+define signext i1 @vpreduce_smax_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smax.nxv64i1(i1, <64 x i1>, <64 x i1>, i32)
+declare i1 @llvm.vp.reduce.smax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
-define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smax_nxv64i1:
+define signext i1 @vpreduce_smax_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smax_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smax.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smax.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, <1 x i1>, <1 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv1i1:
+define signext i1 @vpreduce_smin_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, <2 x i1>, <2 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv2i1:
+define signext i1 @vpreduce_smin_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, <4 x i1>, <4 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv4i1:
+define signext i1 @vpreduce_smin_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, <8 x i1>, <8 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv8i1:
+define signext i1 @vpreduce_smin_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, <16 x i1>, <16 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv16i1:
+define signext i1 @vpreduce_smin_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, <32 x i1>, <32 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv32i1:
+define signext i1 @vpreduce_smin_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, <64 x i1>, <64 x i1>, i32)
+declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
-define signext i1 @vpreduce_smin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_smin_nxv64i1:
+define signext i1 @vpreduce_smin_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_smin_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.smin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.smin.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, <1 x i1>, <1 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv1i1:
+define signext i1 @vpreduce_umax_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, <2 x i1>, <2 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv2i1:
+define signext i1 @vpreduce_umax_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, <4 x i1>, <4 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv4i1:
+define signext i1 @vpreduce_umax_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, <8 x i1>, <8 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv8i1:
+define signext i1 @vpreduce_umax_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, <16 x i1>, <16 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv16i1:
+define signext i1 @vpreduce_umax_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, <32 x i1>, <32 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv32i1:
+define signext i1 @vpreduce_umax_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, <64 x i1>, <64 x i1>, i32)
+declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32)
-define signext i1 @vpreduce_umax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umax_nxv64i1:
+define signext i1 @vpreduce_umax_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umax_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vmv1r.v v9, v0
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: andi a0, a0, 1
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umax.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umax.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv1i1(i1, <1 x i1>, <1 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v1i1(i1, <1 x i1>, <1 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv1i1:
+define signext i1 @vpreduce_umin_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v1i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv2i1(i1, <2 x i1>, <2 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v2i1(i1, <2 x i1>, <2 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv2i1:
+define signext i1 @vpreduce_umin_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v2i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv4i1(i1, <4 x i1>, <4 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v4i1(i1, <4 x i1>, <4 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv4i1:
+define signext i1 @vpreduce_umin_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v4i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv8i1(i1, <8 x i1>, <8 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v8i1(i1, <8 x i1>, <8 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv8i1:
+define signext i1 @vpreduce_umin_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v8i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv16i1(i1, <16 x i1>, <16 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v16i1(i1, <16 x i1>, <16 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv16i1:
+define signext i1 @vpreduce_umin_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v16i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv32i1(i1, <32 x i1>, <32 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v32i1(i1, <32 x i1>, <32 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv32i1:
+define signext i1 @vpreduce_umin_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v32i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl)
ret i1 %r
}
-declare i1 @llvm.vp.reduce.umin.nxv64i1(i1, <64 x i1>, <64 x i1>, i32)
+declare i1 @llvm.vp.reduce.umin.v64i1(i1, <64 x i1>, <64 x i1>, i32)
-define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
-; CHECK-LABEL: vpreduce_umin_nxv64i1:
+define signext i1 @vpreduce_umin_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) {
+; CHECK-LABEL: vpreduce_umin_v64i1:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma
; CHECK-NEXT: vmnot.m v9, v0
; CHECK-NEXT: and a0, a1, a0
; CHECK-NEXT: neg a0, a0
; CHECK-NEXT: ret
- %r = call i1 @llvm.vp.reduce.umin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
+ %r = call i1 @llvm.vp.reduce.umin.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl)
ret i1 %r
}
declare <1 x float> @llvm.fma.v1f32(<1 x float>, <1 x float>, <1 x float>)
-define <1 x float> @vfwmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv1f32:
+define <1 x float> @vfwmacc_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v9, v10
ret <1 x float> %vf
}
-define <1 x float> @vfwmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv1f32:
+define <1 x float> @vfwmacc_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmacc_vf_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v9
ret <1 x float> %vf
}
-define <1 x float> @vfwnmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv1f32:
+define <1 x float> @vfwnmacc_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
ret <1 x float> %vh
}
-define <1 x float> @vfwnmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv1f32:
+define <1 x float> @vfwnmacc_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_vf_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <1 x float> %vh
}
-define <1 x float> @vfwnmacc_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv1f32:
+define <1 x float> @vfwnmacc_fv_v1f32(<1 x float> %va, <1 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_fv_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <1 x float> %vh
}
-define <1 x float> @vfwmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv1f32:
+define <1 x float> @vfwmsac_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v9, v10
ret <1 x float> %vg
}
-define <1 x float> @vfwmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv1f32:
+define <1 x float> @vfwmsac_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmsac_vf_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
ret <1 x float> %vg
}
-define <1 x float> @vfwnmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv1f32:
+define <1 x float> @vfwnmsac_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
ret <1 x float> %vg
}
-define <1 x float> @vfwnmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv1f32:
+define <1 x float> @vfwnmsac_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_vf_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
ret <1 x float> %vg
}
-define <1 x float> @vfwnmsac_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv1f32:
+define <1 x float> @vfwnmsac_fv_v1f32(<1 x float> %va, <1 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_fv_v1f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>)
-define <2 x float> @vfwmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv2f32:
+define <2 x float> @vfwmacc_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v9, v10
ret <2 x float> %vf
}
-define <2 x float> @vfwmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv2f32:
+define <2 x float> @vfwmacc_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmacc_vf_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v9
ret <2 x float> %vf
}
-define <2 x float> @vfwnmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv2f32:
+define <2 x float> @vfwnmacc_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
ret <2 x float> %vh
}
-define <2 x float> @vfwnmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv2f32:
+define <2 x float> @vfwnmacc_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_vf_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <2 x float> %vh
}
-define <2 x float> @vfwnmacc_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv2f32:
+define <2 x float> @vfwnmacc_fv_v2f32(<2 x float> %va, <2 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_fv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <2 x float> %vh
}
-define <2 x float> @vfwmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv2f32:
+define <2 x float> @vfwmsac_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v9, v10
ret <2 x float> %vg
}
-define <2 x float> @vfwmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv2f32:
+define <2 x float> @vfwmsac_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmsac_vf_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
ret <2 x float> %vg
}
-define <2 x float> @vfwnmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv2f32:
+define <2 x float> @vfwnmsac_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
ret <2 x float> %vg
}
-define <2 x float> @vfwnmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv2f32:
+define <2 x float> @vfwnmsac_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_vf_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
ret <2 x float> %vg
}
-define <2 x float> @vfwnmsac_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv2f32:
+define <2 x float> @vfwnmsac_fv_v2f32(<2 x float> %va, <2 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_fv_v2f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>)
-define <4 x float> @vfwmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv4f32:
+define <4 x float> @vfwmacc_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v9, v10
ret <4 x float> %vf
}
-define <4 x float> @vfwmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv4f32:
+define <4 x float> @vfwmacc_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmacc_vf_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v9
ret <4 x float> %vf
}
-define <4 x float> @vfwnmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv4f32:
+define <4 x float> @vfwnmacc_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
ret <4 x float> %vh
}
-define <4 x float> @vfwnmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv4f32:
+define <4 x float> @vfwnmacc_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_vf_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <4 x float> %vh
}
-define <4 x float> @vfwnmacc_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv4f32:
+define <4 x float> @vfwnmacc_fv_v4f32(<4 x float> %va, <4 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_fv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <4 x float> %vh
}
-define <4 x float> @vfwmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv4f32:
+define <4 x float> @vfwmsac_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v9, v10
ret <4 x float> %vg
}
-define <4 x float> @vfwmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv4f32:
+define <4 x float> @vfwmsac_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmsac_vf_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
ret <4 x float> %vg
}
-define <4 x float> @vfwnmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv4f32:
+define <4 x float> @vfwnmsac_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
ret <4 x float> %vg
}
-define <4 x float> @vfwnmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv4f32:
+define <4 x float> @vfwnmsac_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_vf_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
ret <4 x float> %vg
}
-define <4 x float> @vfwnmsac_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv4f32:
+define <4 x float> @vfwnmsac_fv_v4f32(<4 x float> %va, <4 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_fv_v4f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>)
-define <8 x float> @vfwmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv8f32:
+define <8 x float> @vfwmacc_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v10, v11
ret <8 x float> %vf
}
-define <8 x float> @vfwmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv8f32:
+define <8 x float> @vfwmacc_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmacc_vf_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v10
ret <8 x float> %vf
}
-define <8 x float> @vfwnmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv8f32:
+define <8 x float> @vfwnmacc_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v10, v11
ret <8 x float> %vh
}
-define <8 x float> @vfwnmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv8f32:
+define <8 x float> @vfwnmacc_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_vf_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10
ret <8 x float> %vh
}
-define <8 x float> @vfwnmacc_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv8f32:
+define <8 x float> @vfwnmacc_fv_v8f32(<8 x float> %va, <8 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_fv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10
ret <8 x float> %vh
}
-define <8 x float> @vfwmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv8f32:
+define <8 x float> @vfwmsac_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v10, v11
ret <8 x float> %vg
}
-define <8 x float> @vfwmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv8f32:
+define <8 x float> @vfwmsac_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmsac_vf_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v10
ret <8 x float> %vg
}
-define <8 x float> @vfwnmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv8f32:
+define <8 x float> @vfwnmsac_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v10, v11
ret <8 x float> %vg
}
-define <8 x float> @vfwnmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv8f32:
+define <8 x float> @vfwnmsac_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_vf_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10
ret <8 x float> %vg
}
-define <8 x float> @vfwnmsac_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv8f32:
+define <8 x float> @vfwnmsac_fv_v8f32(<8 x float> %va, <8 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_fv_v8f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10
declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>)
-define <16 x float> @vfwmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv16f32:
+define <16 x float> @vfwmacc_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v12, v14
ret <16 x float> %vf
}
-define <16 x float> @vfwmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv16f32:
+define <16 x float> @vfwmacc_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmacc_vf_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v12
ret <16 x float> %vf
}
-define <16 x float> @vfwnmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv16f32:
+define <16 x float> @vfwnmacc_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v12, v14
ret <16 x float> %vh
}
-define <16 x float> @vfwnmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv16f32:
+define <16 x float> @vfwnmacc_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_vf_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12
ret <16 x float> %vh
}
-define <16 x float> @vfwnmacc_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv16f32:
+define <16 x float> @vfwnmacc_fv_v16f32(<16 x float> %va, <16 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmacc_fv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12
ret <16 x float> %vh
}
-define <16 x float> @vfwmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv16f32:
+define <16 x float> @vfwmsac_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v12, v14
ret <16 x float> %vg
}
-define <16 x float> @vfwmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv16f32:
+define <16 x float> @vfwmsac_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) {
+; CHECK-LABEL: vfwmsac_vf_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v12
ret <16 x float> %vg
}
-define <16 x float> @vfwnmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv16f32:
+define <16 x float> @vfwnmsac_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v12, v14
ret <16 x float> %vg
}
-define <16 x float> @vfwnmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv16f32:
+define <16 x float> @vfwnmsac_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_vf_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12
ret <16 x float> %vg
}
-define <16 x float> @vfwnmsac_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv16f32:
+define <16 x float> @vfwnmsac_fv_v16f32(<16 x float> %va, <16 x half> %vb, half %c) {
+; CHECK-LABEL: vfwnmsac_fv_v16f32:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12
declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>)
-define <1 x double> @vfwmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv1f64:
+define <1 x double> @vfwmacc_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v9, v10
ret <1 x double> %vf
}
-define <1 x double> @vfwmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv1f64:
+define <1 x double> @vfwmacc_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmacc_vf_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v9
ret <1 x double> %vf
}
-define <1 x double> @vfwnmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv1f64:
+define <1 x double> @vfwnmacc_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
ret <1 x double> %vh
}
-define <1 x double> @vfwnmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv1f64:
+define <1 x double> @vfwnmacc_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_vf_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <1 x double> %vh
}
-define <1 x double> @vfwnmacc_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv1f64:
+define <1 x double> @vfwnmacc_fv_v1f64(<1 x double> %va, <1 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_fv_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <1 x double> %vh
}
-define <1 x double> @vfwmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv1f64:
+define <1 x double> @vfwmsac_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v9, v10
ret <1 x double> %vg
}
-define <1 x double> @vfwmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv1f64:
+define <1 x double> @vfwmsac_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmsac_vf_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
ret <1 x double> %vg
}
-define <1 x double> @vfwnmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv1f64:
+define <1 x double> @vfwnmsac_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
ret <1 x double> %vg
}
-define <1 x double> @vfwnmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv1f64:
+define <1 x double> @vfwnmsac_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_vf_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
ret <1 x double> %vg
}
-define <1 x double> @vfwnmsac_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv1f64:
+define <1 x double> @vfwnmsac_fv_v1f64(<1 x double> %va, <1 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_fv_v1f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>)
-define <2 x double> @vfwmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv2f64:
+define <2 x double> @vfwmacc_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v9, v10
ret <2 x double> %vf
}
-define <2 x double> @vfwmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv2f64:
+define <2 x double> @vfwmacc_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmacc_vf_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v9
ret <2 x double> %vf
}
-define <2 x double> @vfwnmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv2f64:
+define <2 x double> @vfwnmacc_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v9, v10
ret <2 x double> %vh
}
-define <2 x double> @vfwnmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv2f64:
+define <2 x double> @vfwnmacc_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_vf_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <2 x double> %vh
}
-define <2 x double> @vfwnmacc_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv2f64:
+define <2 x double> @vfwnmacc_fv_v2f64(<2 x double> %va, <2 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_fv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9
ret <2 x double> %vh
}
-define <2 x double> @vfwmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv2f64:
+define <2 x double> @vfwmsac_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v9, v10
ret <2 x double> %vg
}
-define <2 x double> @vfwmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv2f64:
+define <2 x double> @vfwmsac_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmsac_vf_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v9
ret <2 x double> %vg
}
-define <2 x double> @vfwnmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv2f64:
+define <2 x double> @vfwnmsac_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v9, v10
ret <2 x double> %vg
}
-define <2 x double> @vfwnmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv2f64:
+define <2 x double> @vfwnmsac_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_vf_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
ret <2 x double> %vg
}
-define <2 x double> @vfwnmsac_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv2f64:
+define <2 x double> @vfwnmsac_fv_v2f64(<2 x double> %va, <2 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_fv_v2f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9
declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>)
-define <4 x double> @vfwmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv4f64:
+define <4 x double> @vfwmacc_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v10, v11
ret <4 x double> %vf
}
-define <4 x double> @vfwmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv4f64:
+define <4 x double> @vfwmacc_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmacc_vf_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v10
ret <4 x double> %vf
}
-define <4 x double> @vfwnmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv4f64:
+define <4 x double> @vfwnmacc_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v10, v11
ret <4 x double> %vh
}
-define <4 x double> @vfwnmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv4f64:
+define <4 x double> @vfwnmacc_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_vf_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10
ret <4 x double> %vh
}
-define <4 x double> @vfwnmacc_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv4f64:
+define <4 x double> @vfwnmacc_fv_v4f64(<4 x double> %va, <4 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_fv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10
ret <4 x double> %vh
}
-define <4 x double> @vfwmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv4f64:
+define <4 x double> @vfwmsac_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v10, v11
ret <4 x double> %vg
}
-define <4 x double> @vfwmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv4f64:
+define <4 x double> @vfwmsac_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmsac_vf_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v10
ret <4 x double> %vg
}
-define <4 x double> @vfwnmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv4f64:
+define <4 x double> @vfwnmsac_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v10, v11
ret <4 x double> %vg
}
-define <4 x double> @vfwnmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv4f64:
+define <4 x double> @vfwnmsac_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_vf_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10
ret <4 x double> %vg
}
-define <4 x double> @vfwnmsac_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv4f64:
+define <4 x double> @vfwnmsac_fv_v4f64(<4 x double> %va, <4 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_fv_v4f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10
declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>)
-define <8 x double> @vfwmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
-; CHECK-LABEL: vfwmacc_vv_nxv8f64:
+define <8 x double> @vfwmacc_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
+; CHECK-LABEL: vfwmacc_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwmacc.vv v8, v12, v14
ret <8 x double> %vf
}
-define <8 x double> @vfwmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmacc_vf_nxv8f64:
+define <8 x double> @vfwmacc_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmacc_vf_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwmacc.vf v8, fa0, v12
ret <8 x double> %vf
}
-define <8 x double> @vfwnmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
-; CHECK-LABEL: vfwnmacc_vv_nxv8f64:
+define <8 x double> @vfwnmacc_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
+; CHECK-LABEL: vfwnmacc_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwnmacc.vv v8, v12, v14
ret <8 x double> %vh
}
-define <8 x double> @vfwnmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_vf_nxv8f64:
+define <8 x double> @vfwnmacc_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_vf_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12
ret <8 x double> %vh
}
-define <8 x double> @vfwnmacc_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmacc_fv_nxv8f64:
+define <8 x double> @vfwnmacc_fv_v8f64(<8 x double> %va, <8 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmacc_fv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12
ret <8 x double> %vh
}
-define <8 x double> @vfwmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
-; CHECK-LABEL: vfwmsac_vv_nxv8f64:
+define <8 x double> @vfwmsac_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
+; CHECK-LABEL: vfwmsac_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwmsac.vv v8, v12, v14
ret <8 x double> %vg
}
-define <8 x double> @vfwmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) {
-; CHECK-LABEL: vfwmsac_vf_nxv8f64:
+define <8 x double> @vfwmsac_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) {
+; CHECK-LABEL: vfwmsac_vf_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwmsac.vf v8, fa0, v12
ret <8 x double> %vg
}
-define <8 x double> @vfwnmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
-; CHECK-LABEL: vfwnmsac_vv_nxv8f64:
+define <8 x double> @vfwnmsac_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) {
+; CHECK-LABEL: vfwnmsac_vv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwnmsac.vv v8, v12, v14
ret <8 x double> %vg
}
-define <8 x double> @vfwnmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_vf_nxv8f64:
+define <8 x double> @vfwnmsac_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_vf_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12
ret <8 x double> %vg
}
-define <8 x double> @vfwnmsac_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) {
-; CHECK-LABEL: vfwnmsac_fv_nxv8f64:
+define <8 x double> @vfwnmsac_fv_v8f64(<8 x double> %va, <8 x float> %vb, float %c) {
+; CHECK-LABEL: vfwnmsac_fv_v8f64:
; CHECK: # %bb.0:
; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma
; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12
; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s
-declare <vscale x 2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16>, <vscale x 2 x i1>, i32)
define <vscale x 2 x i1> @vtrunc_nxv2i1_nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16:
; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma
; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
+ %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i1> %v
}
; CHECK-NEXT: vand.vi v8, v8, 1
; CHECK-NEXT: vmsne.vi v0, v8, 0
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<vscale x 2 x i16> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
+ %v = call <vscale x 2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i16(<vscale x 2 x i16> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i1> %v
}
ret <vscale x 15 x i16> %v
}
-declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
+declare <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64>, <vscale x 2 x i1>, i32)
define <vscale x 2 x i32> @vtrunc_nxv2i32_nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64:
; CHECK-NEXT: vnsrl.wi v10, v8, 0, v0.t
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
+ %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> %m, i32 %vl)
ret <vscale x 2 x i32> %v
}
; CHECK-NEXT: vnsrl.wi v10, v8, 0
; CHECK-NEXT: vmv.v.v v8, v10
; CHECK-NEXT: ret
- %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
+ %v = call <vscale x 2 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<vscale x 2 x i64> %a, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> undef, i1 true, i32 0), <vscale x 2 x i1> undef, <vscale x 2 x i32> zeroinitializer), i32 %vl)
ret <vscale x 2 x i32> %v
}
ret <vscale x 32 x i8> %v
}
-declare <vscale x 32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<vscale x 32 x i64>, <vscale x 32 x i1>, i32)
+declare <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64>, <vscale x 32 x i1>, i32)
define <vscale x 32 x i32> @vtrunc_nxv32i64_nxv32i32(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 zeroext %vl) {
; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32:
; CHECK-NEXT: add sp, sp, a0
; CHECK-NEXT: addi sp, sp, 16
; CHECK-NEXT: ret
- %v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl)
+ %v = call <vscale x 32 x i32> @llvm.vp.trunc.nxv32i32.nxv32i64(<vscale x 32 x i64> %a, <vscale x 32 x i1> %m, i32 %vl)
ret <vscale x 32 x i32> %v
}