From 3d6c63d41348cd57a2e02b35e5624f6d42a577f6 Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Thu, 6 Oct 2022 10:20:37 -0700 Subject: [PATCH] [RISCV] Cleanup some vector tests. NFC Some tests had scalable vector intrinsic names with fixed vector types. Some had types in the wrong order. Remove scalable vector test from fixed vector files. Also replace insert+shuffle constexprs with fixed constant vectors. --- .../RISCV/rvv/fixed-vector-trunc-vp-mask.ll | 42 +-- .../CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll | 132 ++++---- .../RISCV/rvv/fixed-vectors-extload-truncstore.ll | 314 ------------------ .../CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll | 84 ----- .../rvv/fixed-vectors-peephole-vmerge-vops.ll | 197 +++++------ .../RISCV/rvv/fixed-vectors-reduction-mask-vp.ll | 224 ++++++------- .../CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll | 360 ++++++++++----------- llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll | 6 +- llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll | 10 +- 9 files changed, 486 insertions(+), 883 deletions(-) diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll index 78bfd4a..816f347 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp-mask.ll @@ -2,77 +2,77 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -declare <2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<2 x i16>, <2 x i1>, i32) +declare <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16>, <2 x i1>, i32) -define <2 x i1> @vtrunc_nxv2i1_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16: +define <2 x i1> @vtrunc_v2i1_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i1_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<2 x i16> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) ret <2 x i1> %v } -define <2 x i1> @vtrunc_nxv2i1_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16_unmasked: +define <2 x i1> @vtrunc_v2i1_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i1_v2i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i1> @llvm.vp.trunc.nxv2i16.nxv2i1(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i16(<2 x i16> %a, <2 x i1> , i32 %vl) ret <2 x i1> %v } -declare <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<2 x i32>, <2 x i1>, i32) +declare <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32>, <2 x i1>, i32) -define <2 x i1> @vtrunc_nxv2i1_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32: +define <2 x i1> @vtrunc_v2i1_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i1_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t ; CHECK-NEXT: vsetvli zero, zero, e32, mf2, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) ret <2 x i1> %v } -define <2 x i1> @vtrunc_nxv2i1_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i1_nxv2i32_unmasked: +define <2 x i1> @vtrunc_v2i1_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i1_v2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i32(<2 x i32> %a, <2 x i1> , i32 %vl) ret <2 x i1> %v } -declare <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<2 x i64>, <2 x i1>, i32) +declare <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64>, <2 x i1>, i32) -define <2 x i1> @vtrunc_nxv2i1_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64: +define <2 x i1> @vtrunc_v2i1_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i1_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, mu ; CHECK-NEXT: vand.vi v8, v8, 1, v0.t ; CHECK-NEXT: vsetvli zero, zero, e64, m1, ta, ma ; CHECK-NEXT: vmsne.vi v0, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i1> %v } -define <2 x i1> @vtrunc_nxv2i1_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i1_nxv2i64_unmasked: +define <2 x i1> @vtrunc_v2i1_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i1_v2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i1> @llvm.vp.trunc.nxv2i1.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i1> @llvm.vp.trunc.v2i1.v2i64(<2 x i64> %a, <2 x i1> , i32 %vl) ret <2 x i1> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll index 2c9a453..f9a15ac 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vector-trunc-vp.ll @@ -2,56 +2,56 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v,+m -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s | FileCheck %s -declare <2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<2 x i16>, <2 x i1>, i32) +declare <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16>, <2 x i1>, i32) -define <2 x i7> @vtrunc_nxv2i7_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i7_nxv2i16: +define <2 x i7> @vtrunc_v2i7_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i7_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i7> @llvm.vp.trunc.nxv2i7.nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i7> @llvm.vp.trunc.v2i7.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) ret <2 x i7> %v } -declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<2 x i15>, <2 x i1>, i32) +declare <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15>, <2 x i1>, i32) -define <2 x i8> @vtrunc_nxv2i8_nxv2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i15: +define <2 x i8> @vtrunc_v2i8_v2i15(<2 x i15> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i15: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i15(<2 x i15> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v } -declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16>, <2 x i1>, i32) +declare <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16>, <2 x i1>, i32) -define <2 x i8> @vtrunc_nxv2i8_nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16: +define <2 x i8> @vtrunc_v2i8_v2i16(<2 x i16> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i16: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v } -define <2 x i8> @vtrunc_nxv2i8_nxv2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i16_unmasked: +define <2 x i8> @vtrunc_v2i8_v2i16_unmasked(<2 x i16> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i16_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i16(<2 x i16> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i16(<2 x i16> %a, <2 x i1> , i32 %vl) ret <2 x i8> %v } -declare <128 x i7> @llvm.vp.trunc.nxv128i7.nxv128i16(<128 x i16>, <128 x i1>, i32) +declare <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16>, <128 x i1>, i32) -define <128 x i7> @vtrunc_nxv128i7_nxv128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv128i7_nxv128i16: +define <128 x i7> @vtrunc_v128i7_v128i16(<128 x i16> %a, <128 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v128i7_v128i16: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 @@ -90,40 +90,40 @@ define <128 x i7> @vtrunc_nxv128i7_nxv128i16(<128 x i16> %a, <128 x i1> %m, i32 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %v = call <128 x i7> @llvm.vp.trunc.nxv128i7.nxv128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl) + %v = call <128 x i7> @llvm.vp.trunc.v128i7.v128i16(<128 x i16> %a, <128 x i1> %m, i32 %vl) ret <128 x i7> %v } -declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32>, <2 x i1>, i32) +declare <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32>, <2 x i1>, i32) -define <2 x i8> @vtrunc_nxv2i8_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32: +define <2 x i8> @vtrunc_v2i8_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v } -define <2 x i8> @vtrunc_nxv2i8_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i32_unmasked: +define <2 x i8> @vtrunc_v2i8_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i32(<2 x i32> %a, <2 x i1> , i32 %vl) ret <2 x i8> %v } -declare <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64>, <2 x i1>, i32) +declare <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64>, <2 x i1>, i32) -define <2 x i8> @vtrunc_nxv2i8_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64: +define <2 x i8> @vtrunc_v2i8_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t @@ -132,12 +132,12 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %v ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i8> %v } -define <2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i8_nxv2i64_unmasked: +define <2 x i8> @vtrunc_v2i8_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i8_v2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 @@ -146,98 +146,98 @@ define <2 x i8> @vtrunc_nxv2i8_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { ; CHECK-NEXT: vsetvli zero, zero, e8, mf8, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i8> @llvm.vp.trunc.nxv2i8.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i8> @llvm.vp.trunc.v2i8.v2i64(<2 x i64> %a, <2 x i1> , i32 %vl) ret <2 x i8> %v } -declare <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32>, <2 x i1>, i32) +declare <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32>, <2 x i1>, i32) -define <2 x i16> @vtrunc_nxv2i16_nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32: +define <2 x i16> @vtrunc_v2i16_v2i32(<2 x i32> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i16_v2i32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> %m, i32 %vl) ret <2 x i16> %v } -define <2 x i16> @vtrunc_nxv2i16_nxv2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i16_nxv2i32_unmasked: +define <2 x i16> @vtrunc_v2i16_v2i32_unmasked(<2 x i32> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i16_v2i32_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i32(<2 x i32> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i32(<2 x i32> %a, <2 x i1> , i32 %vl) ret <2 x i16> %v } -declare <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64>, <2 x i1>, i32) +declare <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64>, <2 x i1>, i32) -define <2 x i16> @vtrunc_nxv2i16_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64: +define <2 x i16> @vtrunc_v2i16_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i16_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i16> %v } -define <2 x i16> @vtrunc_nxv2i16_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i16_nxv2i64_unmasked: +define <2 x i16> @vtrunc_v2i16_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i16_v2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: vsetvli zero, zero, e16, mf4, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i16> @llvm.vp.trunc.nxv2i16.nxv2i64(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i16> @llvm.vp.trunc.v2i16.v2i64(<2 x i64> %a, <2 x i1> , i32 %vl) ret <2 x i16> %v } -declare <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64>, <15 x i1>, i32) +declare <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64>, <15 x i1>, i32) -define <15 x i16> @vtrunc_nxv15i16_nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv15i16_nxv15i64: +define <15 x i16> @vtrunc_v15i16_v15i64(<15 x i64> %a, <15 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v15i16_v15i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, mu ; CHECK-NEXT: vnsrl.wi v16, v8, 0, v0.t ; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v16, 0, v0.t ; CHECK-NEXT: ret - %v = call <15 x i16> @llvm.vp.trunc.nxv15i16.nxv15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl) + %v = call <15 x i16> @llvm.vp.trunc.v15i16.v15i64(<15 x i64> %a, <15 x i1> %m, i32 %vl) ret <15 x i16> %v } -declare <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64>, <2 x i1>, i32) +declare <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64>, <2 x i1>, i32) -define <2 x i32> @vtrunc_nxv2i32_nxv2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: +define <2 x i32> @vtrunc_v2i32_v2i64(<2 x i64> %a, <2 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i32_v2i64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v8, 0, v0.t ; CHECK-NEXT: ret - %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> %m, i32 %vl) + %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> %m, i32 %vl) ret <2 x i32> %v } -define <2 x i32> @vtrunc_nxv2i32_nxv2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64_unmasked: +define <2 x i32> @vtrunc_v2i32_v2i64_unmasked(<2 x i64> %a, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v2i32_v2i64_unmasked: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma ; CHECK-NEXT: vnsrl.wi v8, v8, 0 ; CHECK-NEXT: ret - %v = call <2 x i32> @llvm.vp.trunc.nxv2i64.nxv2i32(<2 x i64> %a, <2 x i1> shufflevector (<2 x i1> insertelement (<2 x i1> undef, i1 true, i32 0), <2 x i1> undef, <2 x i32> zeroinitializer), i32 %vl) + %v = call <2 x i32> @llvm.vp.trunc.v2i32.v2i64(<2 x i64> %a, <2 x i1> , i32 %vl) ret <2 x i32> %v } -declare <128 x i32> @llvm.vp.trunc.nxv128i64.nxv128i32(<128 x i64>, <128 x i1>, i32) +declare <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64>, <128 x i1>, i32) -define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv128i32_nxv128i64: +define <128 x i32> @vtrunc_v128i32_v128i64(<128 x i64> %a, <128 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v128i32_v128i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 @@ -490,14 +490,14 @@ define <128 x i32> @vtrunc_nxv128i32_nxv128i64(<128 x i64> %a, <128 x i1> %m, i3 ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %v = call <128 x i32> @llvm.vp.trunc.nxv128i64.nxv128i32(<128 x i64> %a, <128 x i1> %m, i32 %vl) + %v = call <128 x i32> @llvm.vp.trunc.v128i32.v128i64(<128 x i64> %a, <128 x i1> %m, i32 %vl) ret <128 x i32> %v } -declare <32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<32 x i64>, <32 x i1>, i32) +declare <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64>, <32 x i1>, i32) -define <32 x i32> @vtrunc_nxv32i32_nxv32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) { -; CHECK-LABEL: vtrunc_nxv32i32_nxv32i64: +define <32 x i32> @vtrunc_v32i32_v32i64(<32 x i64> %a, <32 x i1> %m, i32 zeroext %vl) { +; CHECK-LABEL: vtrunc_v32i32_v32i64: ; CHECK: # %bb.0: ; CHECK-NEXT: addi sp, sp, -16 ; CHECK-NEXT: .cfi_def_cfa_offset 16 @@ -536,6 +536,6 @@ define <32 x i32> @vtrunc_nxv32i32_nxv32i64(<32 x i64> %a, <32 x i1> %m, i32 zer ; CHECK-NEXT: add sp, sp, a0 ; CHECK-NEXT: addi sp, sp, 16 ; CHECK-NEXT: ret - %v = call <32 x i32> @llvm.vp.trunc.nxv32i64.nxv32i32(<32 x i64> %a, <32 x i1> %m, i32 %vl) + %v = call <32 x i32> @llvm.vp.trunc.v32i32.v32i64(<32 x i64> %a, <32 x i1> %m, i32 %vl) ret <32 x i32> %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll index dcbc3e0..fff3695 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-extload-truncstore.ll @@ -1845,317 +1845,3 @@ define void @truncstore_v16i64_v16i32(<16 x i64> %x, <16 x i32>* %z) { store <16 x i32> %y, <16 x i32>* %z ret void } - -define @extload_nxv2f16_nxv2f32(* %x) { -; CHECK-LABEL: extload_nxv2f16_nxv2f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vle16.v v9, (a0) -; CHECK-NEXT: vfwcvt.f.f.v v8, v9 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv2f16_nxv2f64(* %x) { -; CHECK-LABEL: extload_nxv2f16_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vle16.v v8, (a0) -; CHECK-NEXT: vfwcvt.f.f.v v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m1, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v10 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv4f16_nxv4f32(* %x) { -; CHECK-LABEL: extload_nxv4f16_nxv4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vl1re16.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v10 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv4f16_nxv4f64(* %x) { -; CHECK-LABEL: extload_nxv4f16_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl1re16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m1, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m2, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v12 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv8f16_nxv8f32(* %x) { -; CHECK-LABEL: extload_nxv8f16_nxv8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vl2re16.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v12 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv8f16_nxv8f64(* %x) { -; CHECK-LABEL: extload_nxv8f16_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl2re16.v v8, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v16 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv16f16_nxv16f32(* %x) { -; CHECK-LABEL: extload_nxv16f16_nxv16f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m4, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v16 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define @extload_nxv16f16_nxv16f64(* %x) { -; CHECK-LABEL: extload_nxv16f16_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl4re16.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v20, v16 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v20 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v24, v18 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v16, v24 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define void @truncstore_nxv2f32_nxv2f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv2f32_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v9, v8 -; CHECK-NEXT: vse16.v v9, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define @extload_nxv2f32_nxv2f64(* %x) { -; CHECK-LABEL: extload_nxv2f32_nxv2f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl1re32.v v10, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m1, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v10 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define void @truncstore_nxv4f32_nxv4f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv4f32_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vs1r.v v10, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define @extload_nxv4f32_nxv4f64(* %x) { -; CHECK-LABEL: extload_nxv4f32_nxv4f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl2re32.v v12, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m2, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v12 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define void @truncstore_nxv8f32_nxv8f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv8f32_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vs2r.v v12, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define @extload_nxv8f32_nxv8f64(* %x) { -; CHECK-LABEL: extload_nxv8f32_nxv8f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl4re32.v v16, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v16 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define void @truncstore_nxv16f32_nxv16f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv16f32_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e16, m4, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vs4r.v v16, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define @extload_nxv16f32_nxv16f64(* %x) { -; CHECK-LABEL: extload_nxv16f32_nxv16f64: -; CHECK: # %bb.0: -; CHECK-NEXT: vl8re32.v v24, (a0) -; CHECK-NEXT: vsetvli a0, zero, e32, m4, ta, ma -; CHECK-NEXT: vfwcvt.f.f.v v8, v24 -; CHECK-NEXT: vfwcvt.f.f.v v16, v28 -; CHECK-NEXT: ret - %y = load , * %x - %z = fpext %y to - ret %z -} - -define void @truncstore_nxv2f64_nxv2f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv2f64_nxv2f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; CHECK-NEXT: vfncvt.rod.f.f.w v10, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, mf2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v8, v10 -; CHECK-NEXT: vse16.v v8, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv2f64_nxv2f32( %x, * %z) { -; CHECK-LABEL: truncstore_nxv2f64_nxv2f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m1, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v10, v8 -; CHECK-NEXT: vs1r.v v10, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv4f64_nxv4f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv4f64_nxv4f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vfncvt.rod.f.f.w v12, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m1, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v8, v12 -; CHECK-NEXT: vs1r.v v8, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv4f64_nxv4f32( %x, * %z) { -; CHECK-LABEL: truncstore_nxv4f64_nxv4f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v12, v8 -; CHECK-NEXT: vs2r.v v12, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv8f64_nxv8f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv8f64_nxv8f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.rod.f.f.w v16, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v8, v16 -; CHECK-NEXT: vs2r.v v8, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv8f64_nxv8f32( %x, * %z) { -; CHECK-LABEL: truncstore_nxv8f64_nxv8f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v16, v8 -; CHECK-NEXT: vs4r.v v16, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv16f64_nxv16f16( %x, * %z) { -; CHECK-LABEL: truncstore_nxv16f64_nxv16f16: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.rod.f.f.w v24, v8 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v8, v24 -; CHECK-NEXT: vsetvli zero, zero, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.rod.f.f.w v12, v16 -; CHECK-NEXT: vsetvli zero, zero, e16, m2, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v10, v12 -; CHECK-NEXT: vs4r.v v8, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} - -define void @truncstore_nxv16f64_nxv16f32( %x, * %z) { -; CHECK-LABEL: truncstore_nxv16f64_nxv16f32: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, ma -; CHECK-NEXT: vfncvt.f.f.w v24, v8 -; CHECK-NEXT: vfncvt.f.f.w v28, v16 -; CHECK-NEXT: vs8r.v v24, (a0) -; CHECK-NEXT: ret - %y = fptrunc %x to - store %y, * %z - ret void -} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll index b017f5e..576159a 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-marith-vp.ll @@ -183,87 +183,3 @@ define <16 x i1> @xor_v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 zeroex %v = call <16 x i1> @llvm.vp.xor.v16i1(<16 x i1> %b, <16 x i1> %c, <16 x i1> %a, i32 %evl) ret <16 x i1> %v } - -declare @llvm.vp.xor.nxv1i1(, , , i32) - -define @xor_nxv1i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv1i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv1i1( %b, %c, %a, i32 %evl) - ret %v -} - -declare @llvm.vp.xor.nxv2i1(, , , i32) - -define @xor_nxv2i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv2i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv2i1( %b, %c, %a, i32 %evl) - ret %v -} - -declare @llvm.vp.xor.nxv4i1(, , , i32) - -define @xor_nxv4i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv4i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv4i1( %b, %c, %a, i32 %evl) - ret %v -} - -declare @llvm.vp.xor.nxv8i1(, , , i32) - -define @xor_nxv8i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv8i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv8i1( %b, %c, %a, i32 %evl) - ret %v -} - -declare @llvm.vp.xor.nxv16i1(, , , i32) - -define @xor_nxv16i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv16i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv16i1( %b, %c, %a, i32 %evl) - ret %v -} - -declare @llvm.vp.xor.nxv32i1(, , , i32) - -define @xor_nxv32i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv32i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv32i1( %b, %c, %a, i32 %evl) - ret %v -} - -declare @llvm.vp.xor.nxv64i1(, , , i32) - -define @xor_nxv64i1( %b, %c, %a, i32 zeroext %evl) { -; CHECK-LABEL: xor_nxv64i1: -; CHECK: # %bb.0: -; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma -; CHECK-NEXT: vmxor.mm v0, v0, v8 -; CHECK-NEXT: ret - %v = call @llvm.vp.xor.nxv64i1( %b, %c, %a, i32 %evl) - ret %v -} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll index 1286f69..a0751ca 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-peephole-vmerge-vops.ll @@ -1,28 +1,28 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc < %s -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=256 | FileCheck %s -declare <8 x i16> @llvm.vp.merge.nxv2i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x double> @llvm.vp.merge.nxv2f64(<8 x i1>, <8 x double>, <8 x double>, i32) +declare <8 x i16> @llvm.vp.merge.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) +declare <8 x i32> @llvm.vp.merge.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) +declare <8 x float> @llvm.vp.merge.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) +declare <8 x double> @llvm.vp.merge.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) ; Test binary operator with vp.merge and vp.smax. -declare <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) +declare <8 x i32> @llvm.vp.add.v8i32(<8 x i32>, <8 x i32>, <8 x i1>, i32) define <8 x i32> @vpmerge_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } ; Test glued node of merge should not be deleted. -declare <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) +declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpadd2: ; CHECK: # %bb.0: @@ -31,11 +31,11 @@ define <8 x i32> @vpmerge_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) - %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) + %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } @@ -46,135 +46,136 @@ define <8 x i32> @vpmerge_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } ; Test float binary operator with vp.merge and vp.fadd. -declare <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float>, <8 x float>, <8 x i1>, i32) +declare <8 x float> @llvm.vp.fadd.v8f32(<8 x float>, <8 x float>, <8 x i1>, i32) define <8 x float> @vpmerge_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x float> %y, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfadd: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl) - %b = call <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) + %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl) + %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) ret <8 x float> %b } ; Test conversion by fptosi. -declare <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float>, <8 x i1>, i32) +declare <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float>, <8 x i1>, i32) define <8 x i16> @vpmerge_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptosi: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, tu, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x i16> @llvm.vp.merge.nxv2i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl) + %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x i16> @llvm.vp.merge.v8i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl) ret <8 x i16> %b } ; Test conversion by sitofp. -declare <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64>, <8 x i1>, i32) +declare <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x float> @vpmerge_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpsitofp: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) + %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) ret <8 x float> %b } ; Test integer extension by vp.zext. -declare <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8>, <8 x i1>, i32) +declare <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8>, <8 x i1>, i32) define <8 x i32> @vpmerge_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpzext: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } ; Test integer truncation by vp.trunc. -declare <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64>, <8 x i1>, i32) +declare <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64>, <8 x i1>, i32) define <8 x i32> @vpmerge_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vptrunc: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } ; Test integer extension by vp.fpext. -declare <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float>, <8 x i1>, i32) +declare <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float>, <8 x i1>, i32) define <8 x double> @vpmerge_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfpext: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x double> @llvm.vp.merge.nxv2f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl) + %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x double> @llvm.vp.merge.v8f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl) ret <8 x double> %b } ; Test integer truncation by vp.trunc. -declare <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double>, <8 x i1>, i32) +declare <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double>, <8 x i1>, i32) define <8 x float> @vpmerge_vpfptrunc(<8 x float> %passthru, <8 x double> %x, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpfptrunc: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a0, e32, m1, tu, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x float> @llvm.vp.merge.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) + %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x float> @llvm.vp.merge.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) ret <8 x float> %b } ; Test load operation by vp.load. -declare <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> *, <8 x i1>, i32) -define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, <8 x i32> * %p, <8 x i1> %m, i32 zeroext %vl) { +declare <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>*, <8 x i1>, i32) + +define <8 x i32> @vpmerge_vpload(<8 x i32> %passthru, <8 x i32>* %p, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } ; Test result have chain and glued node. -define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { +define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32>* %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpmerge_vpload2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, ma @@ -182,18 +183,18 @@ define <8 x i32> @vpmerge_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> ; CHECK-NEXT: vsetvli zero, zero, e32, m1, tu, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl) - %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.merge.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl) + %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.merge.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } -declare <8 x i16> @llvm.vp.select.nxv2i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) -declare <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) -declare <8 x float> @llvm.vp.select.nxv2f32(<8 x i1>, <8 x float>, <8 x float>, i32) -declare <8 x double> @llvm.vp.select.nxv2f64(<8 x i1>, <8 x double>, <8 x double>, i32) +declare <8 x i16> @llvm.vp.select.v8i16(<8 x i1>, <8 x i16>, <8 x i16>, i32) +declare <8 x i32> @llvm.vp.select.v8i32(<8 x i1>, <8 x i32>, <8 x i32>, i32) +declare <8 x float> @llvm.vp.select.v8f32(<8 x i1>, <8 x float>, <8 x float>, i32) +declare <8 x double> @llvm.vp.select.v8f64(<8 x i1>, <8 x double>, <8 x double>, i32) ; Test binary operator with vp.select and vp.add. define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y, <8 x i1> %m, i32 zeroext %vl) { @@ -202,10 +203,10 @@ define <8 x i32> @vpselect_vpadd(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> %y ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } @@ -217,11 +218,11 @@ define <8 x i32> @vpselect_vpadd2(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> % ; CHECK-NEXT: vmseq.vv v0, v9, v10 ; CHECK-NEXT: vadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) - %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) + %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } @@ -232,10 +233,10 @@ define <8 x i32> @vpselect_vpadd3(<8 x i32> %passthru, <8 x i32> %x, <8 x i32> % ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma ; CHECK-NEXT: vadd.vv v8, v9, v10 ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.add.nxv2i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.add.v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %mask, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } @@ -246,10 +247,10 @@ define <8 x float> @vpselect_vpfadd(<8 x float> %passthru, <8 x float> %x, <8 x ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfadd.vv v8, v9, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x float> @llvm.vp.fadd.nxv2f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl) - %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) + %a = call <8 x float> @llvm.vp.fadd.v8f32(<8 x float> %x, <8 x float> %y, <8 x i1> %mask, i32 %vl) + %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) ret <8 x float> %b } @@ -260,10 +261,10 @@ define <8 x i16> @vpselect_vpfptosi(<8 x i16> %passthru, <8 x float> %x, <8 x i1 ; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, mu ; CHECK-NEXT: vfncvt.rtz.x.f.w v8, v9, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i16> @llvm.vp.fptosi.nxv2i16.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x i16> @llvm.vp.select.nxv2i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl) + %a = call <8 x i16> @llvm.vp.fptosi.v8i16.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x i16> @llvm.vp.select.v8i16(<8 x i1> %m, <8 x i16> %a, <8 x i16> %passthru, i32 %vl) ret <8 x i16> %b } @@ -274,10 +275,10 @@ define <8 x float> @vpselect_vpsitofp(<8 x float> %passthru, <8 x i64> %x, <8 x ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.x.w v8, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x float> @llvm.vp.sitofp.nxv2f32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) + %a = call <8 x float> @llvm.vp.sitofp.v8f32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) ret <8 x float> %b } @@ -288,10 +289,10 @@ define <8 x i32> @vpselect_vpzext(<8 x i32> %passthru, <8 x i8> %x, <8 x i1> %m, ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vzext.vf4 v8, v9, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.zext.nxv2i32.nxv2i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.zext.v8i32.v8i8(<8 x i8> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } @@ -302,10 +303,10 @@ define <8 x i32> @vpselect_vptrunc(<8 x i32> %passthru, <8 x i64> %x, <8 x i1> % ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vnsrl.wi v8, v10, 0, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.trunc.nxv2i32.nxv2i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.trunc.v8i32.v8i64(<8 x i64> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } @@ -316,10 +317,10 @@ define <8 x double> @vpselect_vpfpext(<8 x double> %passthru, <8 x float> %x, <8 ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfwcvt.f.f.v v8, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x double> @llvm.vp.fpext.nxv2f64.nxv2f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x double> @llvm.vp.select.nxv2f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl) + %a = call <8 x double> @llvm.vp.fpext.v8f64.v8f32(<8 x float> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x double> @llvm.vp.select.v8f64(<8 x i1> %m, <8 x double> %a, <8 x double> %passthru, i32 %vl) ret <8 x double> %b } @@ -330,39 +331,39 @@ define <8 x float> @vpselect_vpfptrunc(<8 x float> %passthru, <8 x double> %x, < ; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, mu ; CHECK-NEXT: vfncvt.f.f.w v8, v10, v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x float> @llvm.vp.fptrunc.nxv2f32.nxv2f64(<8 x double> %x, <8 x i1> %mask, i32 %vl) - %b = call <8 x float> @llvm.vp.select.nxv2f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) + %a = call <8 x float> @llvm.vp.fptrunc.v8f32.v8f64(<8 x double> %x, <8 x i1> %mask, i32 %vl) + %b = call <8 x float> @llvm.vp.select.v8f32(<8 x i1> %m, <8 x float> %a, <8 x float> %passthru, i32 %vl) ret <8 x float> %b } ; Test load operation by vp.load. -define <8 x i32> @vpselect_vpload(<8 x i32> %passthru, <8 x i32> * %p, <8 x i1> %m, i32 zeroext %vl) { +define <8 x i32> @vpselect_vpload(<8 x i32> %passthru, <8 x i32>* %p, <8 x i1> %m, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpload: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } ; Test result have chain and glued node. -define <8 x i32> @vpselect_vpload2(<8 x i32> %passthru, <8 x i32> * %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { +define <8 x i32> @vpselect_vpload2(<8 x i32> %passthru, <8 x i32>* %p, <8 x i32> %x, <8 x i32> %y, i32 zeroext %vl) { ; CHECK-LABEL: vpselect_vpload2: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu ; CHECK-NEXT: vmseq.vv v0, v9, v10 ; CHECK-NEXT: vle32.v v8, (a0), v0.t ; CHECK-NEXT: ret - %splat = insertelement <8 x i1> poison, i1 -1, i32 0 + %splat = insertelement <8 x i1> poison, i1 true, i32 0 %mask = shufflevector <8 x i1> %splat, <8 x i1> poison, <8 x i32> zeroinitializer - %a = call <8 x i32> @llvm.vp.load.nxv2i32.p0nxv2i32(<8 x i32> * %p, <8 x i1> %mask, i32 %vl) - %m = call <8 x i1> @llvm.vp.icmp.nxv2i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) - %b = call <8 x i32> @llvm.vp.select.nxv2i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) + %a = call <8 x i32> @llvm.vp.load.v8i32.p0v8i32(<8 x i32>* %p, <8 x i1> %mask, i32 %vl) + %m = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %x, <8 x i32> %y, metadata !"eq", <8 x i1> %mask, i32 %vl) + %b = call <8 x i32> @llvm.vp.select.v8i32(<8 x i1> %m, <8 x i32> %a, <8 x i32> %passthru, i32 %vl) ret <8 x i32> %b } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll index 7730ecf..49caec5 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-reduction-mask-vp.ll @@ -401,10 +401,10 @@ define signext i1 @vpreduce_add_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v1i1(i1, <1 x i1>, <1 x i1>, i32) -define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv1i1: +define signext i1 @vpreduce_smax_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -414,14 +414,14 @@ define signext i1 @vpreduce_smax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v2i1(i1, <2 x i1>, <2 x i1>, i32) -define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv2i1: +define signext i1 @vpreduce_smax_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -431,14 +431,14 @@ define signext i1 @vpreduce_smax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v4i1(i1, <4 x i1>, <4 x i1>, i32) -define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv4i1: +define signext i1 @vpreduce_smax_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -448,14 +448,14 @@ define signext i1 @vpreduce_smax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v8i1(i1, <8 x i1>, <8 x i1>, i32) -define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv8i1: +define signext i1 @vpreduce_smax_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -465,14 +465,14 @@ define signext i1 @vpreduce_smax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v16i1(i1, <16 x i1>, <16 x i1>, i32) -define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv16i1: +define signext i1 @vpreduce_smax_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -482,14 +482,14 @@ define signext i1 @vpreduce_smax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v32i1(i1, <32 x i1>, <32 x i1>, i32) -define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv32i1: +define signext i1 @vpreduce_smax_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -499,14 +499,14 @@ define signext i1 @vpreduce_smax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smax.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) +declare i1 @llvm.vp.reduce.smax.v64i1(i1, <64 x i1>, <64 x i1>, i32) -define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smax_nxv64i1: +define signext i1 @vpreduce_smax_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smax_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -516,14 +516,14 @@ define signext i1 @vpreduce_smax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smax.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smax.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v1i1(i1, <1 x i1>, <1 x i1>, i32) -define signext i1 @vpreduce_smin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv1i1: +define signext i1 @vpreduce_smin_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -534,14 +534,14 @@ define signext i1 @vpreduce_smin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v2i1(i1, <2 x i1>, <2 x i1>, i32) -define signext i1 @vpreduce_smin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv2i1: +define signext i1 @vpreduce_smin_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -552,14 +552,14 @@ define signext i1 @vpreduce_smin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v4i1(i1, <4 x i1>, <4 x i1>, i32) -define signext i1 @vpreduce_smin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv4i1: +define signext i1 @vpreduce_smin_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -570,14 +570,14 @@ define signext i1 @vpreduce_smin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v8i1(i1, <8 x i1>, <8 x i1>, i32) -define signext i1 @vpreduce_smin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv8i1: +define signext i1 @vpreduce_smin_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -588,14 +588,14 @@ define signext i1 @vpreduce_smin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v16i1(i1, <16 x i1>, <16 x i1>, i32) -define signext i1 @vpreduce_smin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv16i1: +define signext i1 @vpreduce_smin_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -606,14 +606,14 @@ define signext i1 @vpreduce_smin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v32i1(i1, <32 x i1>, <32 x i1>, i32) -define signext i1 @vpreduce_smin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv32i1: +define signext i1 @vpreduce_smin_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -624,14 +624,14 @@ define signext i1 @vpreduce_smin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.smin.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) +declare i1 @llvm.vp.reduce.smin.v64i1(i1, <64 x i1>, <64 x i1>, i32) -define signext i1 @vpreduce_smin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_smin_nxv64i1: +define signext i1 @vpreduce_smin_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_smin_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -642,14 +642,14 @@ define signext i1 @vpreduce_smin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.smin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.smin.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v1i1(i1, <1 x i1>, <1 x i1>, i32) -define signext i1 @vpreduce_umax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv1i1: +define signext i1 @vpreduce_umax_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -660,14 +660,14 @@ define signext i1 @vpreduce_umax_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v2i1(i1, <2 x i1>, <2 x i1>, i32) -define signext i1 @vpreduce_umax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv2i1: +define signext i1 @vpreduce_umax_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma @@ -678,14 +678,14 @@ define signext i1 @vpreduce_umax_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v4i1(i1, <4 x i1>, <4 x i1>, i32) -define signext i1 @vpreduce_umax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv4i1: +define signext i1 @vpreduce_umax_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma @@ -696,14 +696,14 @@ define signext i1 @vpreduce_umax_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v8i1(i1, <8 x i1>, <8 x i1>, i32) -define signext i1 @vpreduce_umax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv8i1: +define signext i1 @vpreduce_umax_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma @@ -714,14 +714,14 @@ define signext i1 @vpreduce_umax_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v16i1(i1, <16 x i1>, <16 x i1>, i32) -define signext i1 @vpreduce_umax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv16i1: +define signext i1 @vpreduce_umax_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma @@ -732,14 +732,14 @@ define signext i1 @vpreduce_umax_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v32i1(i1, <32 x i1>, <32 x i1>, i32) -define signext i1 @vpreduce_umax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv32i1: +define signext i1 @vpreduce_umax_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma @@ -750,14 +750,14 @@ define signext i1 @vpreduce_umax_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umax.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) +declare i1 @llvm.vp.reduce.umax.v64i1(i1, <64 x i1>, <64 x i1>, i32) -define signext i1 @vpreduce_umax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umax_nxv64i1: +define signext i1 @vpreduce_umax_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umax_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vmv1r.v v9, v0 ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma @@ -768,14 +768,14 @@ define signext i1 @vpreduce_umax_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> ; CHECK-NEXT: andi a0, a0, 1 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umax.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umax.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv1i1(i1, <1 x i1>, <1 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v1i1(i1, <1 x i1>, <1 x i1>, i32) -define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv1i1: +define signext i1 @vpreduce_umin_v1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v1i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -785,14 +785,14 @@ define signext i1 @vpreduce_umin_nxv1i1(i1 signext %s, <1 x i1> %v, <1 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v1i1(i1 %s, <1 x i1> %v, <1 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv2i1(i1, <2 x i1>, <2 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v2i1(i1, <2 x i1>, <2 x i1>, i32) -define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv2i1: +define signext i1 @vpreduce_umin_v2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v2i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -802,14 +802,14 @@ define signext i1 @vpreduce_umin_nxv2i1(i1 signext %s, <2 x i1> %v, <2 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v2i1(i1 %s, <2 x i1> %v, <2 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv4i1(i1, <4 x i1>, <4 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v4i1(i1, <4 x i1>, <4 x i1>, i32) -define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv4i1: +define signext i1 @vpreduce_umin_v4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v4i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -819,14 +819,14 @@ define signext i1 @vpreduce_umin_nxv4i1(i1 signext %s, <4 x i1> %v, <4 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v4i1(i1 %s, <4 x i1> %v, <4 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv8i1(i1, <8 x i1>, <8 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v8i1(i1, <8 x i1>, <8 x i1>, i32) -define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv8i1: +define signext i1 @vpreduce_umin_v8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v8i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -836,14 +836,14 @@ define signext i1 @vpreduce_umin_nxv8i1(i1 signext %s, <8 x i1> %v, <8 x i1> %m, ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v8i1(i1 %s, <8 x i1> %v, <8 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv16i1(i1, <16 x i1>, <16 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v16i1(i1, <16 x i1>, <16 x i1>, i32) -define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv16i1: +define signext i1 @vpreduce_umin_v16i1(i1 signext %s, <16 x i1> %v, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v16i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -853,14 +853,14 @@ define signext i1 @vpreduce_umin_nxv16i1(i1 signext %s, <16 x i1> %v, <16 x i1> ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v16i1(i1 %s, <16 x i1> %v, <16 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv32i1(i1, <32 x i1>, <32 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v32i1(i1, <32 x i1>, <32 x i1>, i32) -define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv32i1: +define signext i1 @vpreduce_umin_v32i1(i1 signext %s, <32 x i1> %v, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v32i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m2, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -870,14 +870,14 @@ define signext i1 @vpreduce_umin_nxv32i1(i1 signext %s, <32 x i1> %v, <32 x i1> ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v32i1(i1 %s, <32 x i1> %v, <32 x i1> %m, i32 %evl) ret i1 %r } -declare i1 @llvm.vp.reduce.umin.nxv64i1(i1, <64 x i1>, <64 x i1>, i32) +declare i1 @llvm.vp.reduce.umin.v64i1(i1, <64 x i1>, <64 x i1>, i32) -define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { -; CHECK-LABEL: vpreduce_umin_nxv64i1: +define signext i1 @vpreduce_umin_v64i1(i1 signext %s, <64 x i1> %v, <64 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vpreduce_umin_v64i1: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetvli zero, a1, e8, m4, ta, ma ; CHECK-NEXT: vmnot.m v9, v0 @@ -887,7 +887,7 @@ define signext i1 @vpreduce_umin_nxv64i1(i1 signext %s, <64 x i1> %v, <64 x i1> ; CHECK-NEXT: and a0, a1, a0 ; CHECK-NEXT: neg a0, a0 ; CHECK-NEXT: ret - %r = call i1 @llvm.vp.reduce.umin.nxv64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) + %r = call i1 @llvm.vp.reduce.umin.v64i1(i1 %s, <64 x i1> %v, <64 x i1> %m, i32 %evl) ret i1 %r } diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll index 9263fa7..cc9a8f0 100644 --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-vfwmacc.ll @@ -6,8 +6,8 @@ declare <1 x float> @llvm.fma.v1f32(<1 x float>, <1 x float>, <1 x float>) -define <1 x float> @vfwmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv1f32: +define <1 x float> @vfwmacc_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { +; CHECK-LABEL: vfwmacc_vv_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 @@ -18,8 +18,8 @@ define <1 x float> @vfwmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x hal ret <1 x float> %vf } -define <1 x float> @vfwmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { -; CHECK-LABEL: vfwmacc_vf_nxv1f32: +define <1 x float> @vfwmacc_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) { +; CHECK-LABEL: vfwmacc_vf_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 @@ -32,8 +32,8 @@ define <1 x float> @vfwmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) ret <1 x float> %vf } -define <1 x float> @vfwnmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv1f32: +define <1 x float> @vfwnmacc_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 @@ -46,8 +46,8 @@ define <1 x float> @vfwnmacc_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x ha ret <1 x float> %vh } -define <1 x float> @vfwnmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv1f32: +define <1 x float> @vfwnmacc_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_vf_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -62,8 +62,8 @@ define <1 x float> @vfwnmacc_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c ret <1 x float> %vh } -define <1 x float> @vfwnmacc_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv1f32: +define <1 x float> @vfwnmacc_fv_v1f32(<1 x float> %va, <1 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_fv_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -78,8 +78,8 @@ define <1 x float> @vfwnmacc_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c ret <1 x float> %vh } -define <1 x float> @vfwmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv1f32: +define <1 x float> @vfwmsac_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { +; CHECK-LABEL: vfwmsac_vv_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 @@ -91,8 +91,8 @@ define <1 x float> @vfwmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x hal ret <1 x float> %vg } -define <1 x float> @vfwmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { -; CHECK-LABEL: vfwmsac_vf_nxv1f32: +define <1 x float> @vfwmsac_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) { +; CHECK-LABEL: vfwmsac_vf_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 @@ -106,8 +106,8 @@ define <1 x float> @vfwmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) ret <1 x float> %vg } -define <1 x float> @vfwnmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv1f32: +define <1 x float> @vfwnmsac_vv_v1f32(<1 x float> %va, <1 x half> %vb, <1 x half> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 @@ -119,8 +119,8 @@ define <1 x float> @vfwnmsac_vv_nxv1f32(<1 x float> %va, <1 x half> %vb, <1 x ha ret <1 x float> %vg } -define <1 x float> @vfwnmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv1f32: +define <1 x float> @vfwnmsac_vf_v1f32(<1 x float> %va, <1 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_vf_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -134,8 +134,8 @@ define <1 x float> @vfwnmsac_vf_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c ret <1 x float> %vg } -define <1 x float> @vfwnmsac_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv1f32: +define <1 x float> @vfwnmsac_fv_v1f32(<1 x float> %va, <1 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_fv_v1f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -151,8 +151,8 @@ define <1 x float> @vfwnmsac_fv_nxv1f32(<1 x float> %va, <1 x half> %vb, half %c declare <2 x float> @llvm.fma.v2f32(<2 x float>, <2 x float>, <2 x float>) -define <2 x float> @vfwmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv2f32: +define <2 x float> @vfwmacc_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { +; CHECK-LABEL: vfwmacc_vv_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 @@ -163,8 +163,8 @@ define <2 x float> @vfwmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x hal ret <2 x float> %vf } -define <2 x float> @vfwmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { -; CHECK-LABEL: vfwmacc_vf_nxv2f32: +define <2 x float> @vfwmacc_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) { +; CHECK-LABEL: vfwmacc_vf_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 @@ -177,8 +177,8 @@ define <2 x float> @vfwmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) ret <2 x float> %vf } -define <2 x float> @vfwnmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv2f32: +define <2 x float> @vfwnmacc_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 @@ -191,8 +191,8 @@ define <2 x float> @vfwnmacc_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x ha ret <2 x float> %vh } -define <2 x float> @vfwnmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv2f32: +define <2 x float> @vfwnmacc_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_vf_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -207,8 +207,8 @@ define <2 x float> @vfwnmacc_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c ret <2 x float> %vh } -define <2 x float> @vfwnmacc_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv2f32: +define <2 x float> @vfwnmacc_fv_v2f32(<2 x float> %va, <2 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_fv_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -223,8 +223,8 @@ define <2 x float> @vfwnmacc_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c ret <2 x float> %vh } -define <2 x float> @vfwmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv2f32: +define <2 x float> @vfwmsac_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { +; CHECK-LABEL: vfwmsac_vv_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 @@ -236,8 +236,8 @@ define <2 x float> @vfwmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x hal ret <2 x float> %vg } -define <2 x float> @vfwmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { -; CHECK-LABEL: vfwmsac_vf_nxv2f32: +define <2 x float> @vfwmsac_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) { +; CHECK-LABEL: vfwmsac_vf_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 @@ -251,8 +251,8 @@ define <2 x float> @vfwmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) ret <2 x float> %vg } -define <2 x float> @vfwnmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv2f32: +define <2 x float> @vfwnmsac_vv_v2f32(<2 x float> %va, <2 x half> %vb, <2 x half> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 @@ -264,8 +264,8 @@ define <2 x float> @vfwnmsac_vv_nxv2f32(<2 x float> %va, <2 x half> %vb, <2 x ha ret <2 x float> %vg } -define <2 x float> @vfwnmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv2f32: +define <2 x float> @vfwnmsac_vf_v2f32(<2 x float> %va, <2 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_vf_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -279,8 +279,8 @@ define <2 x float> @vfwnmsac_vf_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c ret <2 x float> %vg } -define <2 x float> @vfwnmsac_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv2f32: +define <2 x float> @vfwnmsac_fv_v2f32(<2 x float> %va, <2 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_fv_v2f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e16, mf4, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -297,8 +297,8 @@ define <2 x float> @vfwnmsac_fv_nxv2f32(<2 x float> %va, <2 x half> %vb, half %c declare <4 x float> @llvm.fma.v4f32(<4 x float>, <4 x float>, <4 x float>) -define <4 x float> @vfwmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv4f32: +define <4 x float> @vfwmacc_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { +; CHECK-LABEL: vfwmacc_vv_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 @@ -309,8 +309,8 @@ define <4 x float> @vfwmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x hal ret <4 x float> %vf } -define <4 x float> @vfwmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { -; CHECK-LABEL: vfwmacc_vf_nxv4f32: +define <4 x float> @vfwmacc_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) { +; CHECK-LABEL: vfwmacc_vf_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 @@ -323,8 +323,8 @@ define <4 x float> @vfwmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) ret <4 x float> %vf } -define <4 x float> @vfwnmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv4f32: +define <4 x float> @vfwnmacc_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 @@ -337,8 +337,8 @@ define <4 x float> @vfwnmacc_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x ha ret <4 x float> %vh } -define <4 x float> @vfwnmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv4f32: +define <4 x float> @vfwnmacc_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_vf_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -353,8 +353,8 @@ define <4 x float> @vfwnmacc_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c ret <4 x float> %vh } -define <4 x float> @vfwnmacc_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv4f32: +define <4 x float> @vfwnmacc_fv_v4f32(<4 x float> %va, <4 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_fv_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -369,8 +369,8 @@ define <4 x float> @vfwnmacc_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c ret <4 x float> %vh } -define <4 x float> @vfwmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv4f32: +define <4 x float> @vfwmsac_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { +; CHECK-LABEL: vfwmsac_vv_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 @@ -382,8 +382,8 @@ define <4 x float> @vfwmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x hal ret <4 x float> %vg } -define <4 x float> @vfwmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { -; CHECK-LABEL: vfwmsac_vf_nxv4f32: +define <4 x float> @vfwmsac_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) { +; CHECK-LABEL: vfwmsac_vf_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 @@ -397,8 +397,8 @@ define <4 x float> @vfwmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) ret <4 x float> %vg } -define <4 x float> @vfwnmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv4f32: +define <4 x float> @vfwnmsac_vv_v4f32(<4 x float> %va, <4 x half> %vb, <4 x half> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 @@ -410,8 +410,8 @@ define <4 x float> @vfwnmsac_vv_nxv4f32(<4 x float> %va, <4 x half> %vb, <4 x ha ret <4 x float> %vg } -define <4 x float> @vfwnmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv4f32: +define <4 x float> @vfwnmsac_vf_v4f32(<4 x float> %va, <4 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_vf_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -425,8 +425,8 @@ define <4 x float> @vfwnmsac_vf_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c ret <4 x float> %vg } -define <4 x float> @vfwnmsac_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv4f32: +define <4 x float> @vfwnmsac_fv_v4f32(<4 x float> %va, <4 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_fv_v4f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e16, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -442,8 +442,8 @@ define <4 x float> @vfwnmsac_fv_nxv4f32(<4 x float> %va, <4 x half> %vb, half %c declare <8 x float> @llvm.fma.v8f32(<8 x float>, <8 x float>, <8 x float>) -define <8 x float> @vfwmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv8f32: +define <8 x float> @vfwmacc_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { +; CHECK-LABEL: vfwmacc_vv_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 @@ -454,8 +454,8 @@ define <8 x float> @vfwmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x hal ret <8 x float> %vf } -define <8 x float> @vfwmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { -; CHECK-LABEL: vfwmacc_vf_nxv8f32: +define <8 x float> @vfwmacc_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) { +; CHECK-LABEL: vfwmacc_vf_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 @@ -468,8 +468,8 @@ define <8 x float> @vfwmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) ret <8 x float> %vf } -define <8 x float> @vfwnmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv8f32: +define <8 x float> @vfwnmacc_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 @@ -482,8 +482,8 @@ define <8 x float> @vfwnmacc_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x ha ret <8 x float> %vh } -define <8 x float> @vfwnmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv8f32: +define <8 x float> @vfwnmacc_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_vf_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 @@ -498,8 +498,8 @@ define <8 x float> @vfwnmacc_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c ret <8 x float> %vh } -define <8 x float> @vfwnmacc_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv8f32: +define <8 x float> @vfwnmacc_fv_v8f32(<8 x float> %va, <8 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_fv_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 @@ -514,8 +514,8 @@ define <8 x float> @vfwnmacc_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c ret <8 x float> %vh } -define <8 x float> @vfwmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv8f32: +define <8 x float> @vfwmsac_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { +; CHECK-LABEL: vfwmsac_vv_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 @@ -527,8 +527,8 @@ define <8 x float> @vfwmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x hal ret <8 x float> %vg } -define <8 x float> @vfwmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { -; CHECK-LABEL: vfwmsac_vf_nxv8f32: +define <8 x float> @vfwmsac_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) { +; CHECK-LABEL: vfwmsac_vf_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 @@ -542,8 +542,8 @@ define <8 x float> @vfwmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) ret <8 x float> %vg } -define <8 x float> @vfwnmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv8f32: +define <8 x float> @vfwnmsac_vv_v8f32(<8 x float> %va, <8 x half> %vb, <8 x half> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 @@ -555,8 +555,8 @@ define <8 x float> @vfwnmsac_vv_nxv8f32(<8 x float> %va, <8 x half> %vb, <8 x ha ret <8 x float> %vg } -define <8 x float> @vfwnmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv8f32: +define <8 x float> @vfwnmsac_vf_v8f32(<8 x float> %va, <8 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_vf_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 @@ -570,8 +570,8 @@ define <8 x float> @vfwnmsac_vf_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c ret <8 x float> %vg } -define <8 x float> @vfwnmsac_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv8f32: +define <8 x float> @vfwnmsac_fv_v8f32(<8 x float> %va, <8 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_fv_v8f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e16, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 @@ -587,8 +587,8 @@ define <8 x float> @vfwnmsac_fv_nxv8f32(<8 x float> %va, <8 x half> %vb, half %c declare <16 x float> @llvm.fma.v16f32(<16 x float>, <16 x float>, <16 x float>) -define <16 x float> @vfwmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv16f32: +define <16 x float> @vfwmacc_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { +; CHECK-LABEL: vfwmacc_vv_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 @@ -599,8 +599,8 @@ define <16 x float> @vfwmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 ret <16 x float> %vf } -define <16 x float> @vfwmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { -; CHECK-LABEL: vfwmacc_vf_nxv16f32: +define <16 x float> @vfwmacc_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) { +; CHECK-LABEL: vfwmacc_vf_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 @@ -613,8 +613,8 @@ define <16 x float> @vfwmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half ret <16 x float> %vf } -define <16 x float> @vfwnmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv16f32: +define <16 x float> @vfwnmacc_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 @@ -627,8 +627,8 @@ define <16 x float> @vfwnmacc_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 ret <16 x float> %vh } -define <16 x float> @vfwnmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv16f32: +define <16 x float> @vfwnmacc_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_vf_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 @@ -643,8 +643,8 @@ define <16 x float> @vfwnmacc_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, hal ret <16 x float> %vh } -define <16 x float> @vfwnmacc_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv16f32: +define <16 x float> @vfwnmacc_fv_v16f32(<16 x float> %va, <16 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmacc_fv_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 @@ -659,8 +659,8 @@ define <16 x float> @vfwnmacc_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, hal ret <16 x float> %vh } -define <16 x float> @vfwmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv16f32: +define <16 x float> @vfwmsac_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { +; CHECK-LABEL: vfwmsac_vv_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 @@ -672,8 +672,8 @@ define <16 x float> @vfwmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 ret <16 x float> %vg } -define <16 x float> @vfwmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { -; CHECK-LABEL: vfwmsac_vf_nxv16f32: +define <16 x float> @vfwmsac_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) { +; CHECK-LABEL: vfwmsac_vf_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 @@ -687,8 +687,8 @@ define <16 x float> @vfwmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half ret <16 x float> %vg } -define <16 x float> @vfwnmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv16f32: +define <16 x float> @vfwnmsac_vv_v16f32(<16 x float> %va, <16 x half> %vb, <16 x half> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 @@ -700,8 +700,8 @@ define <16 x float> @vfwnmsac_vv_nxv16f32(<16 x float> %va, <16 x half> %vb, <16 ret <16 x float> %vg } -define <16 x float> @vfwnmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv16f32: +define <16 x float> @vfwnmsac_vf_v16f32(<16 x float> %va, <16 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_vf_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 @@ -715,8 +715,8 @@ define <16 x float> @vfwnmsac_vf_nxv16f32(<16 x float> %va, <16 x half> %vb, hal ret <16 x float> %vg } -define <16 x float> @vfwnmsac_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, half %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv16f32: +define <16 x float> @vfwnmsac_fv_v16f32(<16 x float> %va, <16 x half> %vb, half %c) { +; CHECK-LABEL: vfwnmsac_fv_v16f32: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 16, e16, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 @@ -732,8 +732,8 @@ define <16 x float> @vfwnmsac_fv_nxv16f32(<16 x float> %va, <16 x half> %vb, hal declare <1 x double> @llvm.fma.v1f64(<1 x double>, <1 x double>, <1 x double>) -define <1 x double> @vfwmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv1f64: +define <1 x double> @vfwmacc_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { +; CHECK-LABEL: vfwmacc_vv_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 @@ -744,8 +744,8 @@ define <1 x double> @vfwmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x ret <1 x double> %vf } -define <1 x double> @vfwmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { -; CHECK-LABEL: vfwmacc_vf_nxv1f64: +define <1 x double> @vfwmacc_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) { +; CHECK-LABEL: vfwmacc_vf_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 @@ -758,8 +758,8 @@ define <1 x double> @vfwmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float ret <1 x double> %vf } -define <1 x double> @vfwnmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv1f64: +define <1 x double> @vfwnmacc_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 @@ -772,8 +772,8 @@ define <1 x double> @vfwnmacc_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x ret <1 x double> %vh } -define <1 x double> @vfwnmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv1f64: +define <1 x double> @vfwnmacc_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_vf_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -788,8 +788,8 @@ define <1 x double> @vfwnmacc_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, floa ret <1 x double> %vh } -define <1 x double> @vfwnmacc_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv1f64: +define <1 x double> @vfwnmacc_fv_v1f64(<1 x double> %va, <1 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_fv_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -804,8 +804,8 @@ define <1 x double> @vfwnmacc_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, floa ret <1 x double> %vh } -define <1 x double> @vfwmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv1f64: +define <1 x double> @vfwmsac_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { +; CHECK-LABEL: vfwmsac_vv_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 @@ -817,8 +817,8 @@ define <1 x double> @vfwmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x ret <1 x double> %vg } -define <1 x double> @vfwmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { -; CHECK-LABEL: vfwmsac_vf_nxv1f64: +define <1 x double> @vfwmsac_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) { +; CHECK-LABEL: vfwmsac_vf_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 @@ -832,8 +832,8 @@ define <1 x double> @vfwmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float ret <1 x double> %vg } -define <1 x double> @vfwnmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv1f64: +define <1 x double> @vfwnmsac_vv_v1f64(<1 x double> %va, <1 x float> %vb, <1 x float> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 @@ -845,8 +845,8 @@ define <1 x double> @vfwnmsac_vv_nxv1f64(<1 x double> %va, <1 x float> %vb, <1 x ret <1 x double> %vg } -define <1 x double> @vfwnmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv1f64: +define <1 x double> @vfwnmsac_vf_v1f64(<1 x double> %va, <1 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_vf_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -860,8 +860,8 @@ define <1 x double> @vfwnmsac_vf_nxv1f64(<1 x double> %va, <1 x float> %vb, floa ret <1 x double> %vg } -define <1 x double> @vfwnmsac_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv1f64: +define <1 x double> @vfwnmsac_fv_v1f64(<1 x double> %va, <1 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_fv_v1f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 1, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -877,8 +877,8 @@ define <1 x double> @vfwnmsac_fv_nxv1f64(<1 x double> %va, <1 x float> %vb, floa declare <2 x double> @llvm.fma.v2f64(<2 x double>, <2 x double>, <2 x double>) -define <2 x double> @vfwmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv2f64: +define <2 x double> @vfwmacc_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { +; CHECK-LABEL: vfwmacc_vv_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v9, v10 @@ -889,8 +889,8 @@ define <2 x double> @vfwmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x ret <2 x double> %vf } -define <2 x double> @vfwmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { -; CHECK-LABEL: vfwmacc_vf_nxv2f64: +define <2 x double> @vfwmacc_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) { +; CHECK-LABEL: vfwmacc_vf_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v9 @@ -903,8 +903,8 @@ define <2 x double> @vfwmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float ret <2 x double> %vf } -define <2 x double> @vfwnmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv2f64: +define <2 x double> @vfwnmacc_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v9, v10 @@ -917,8 +917,8 @@ define <2 x double> @vfwnmacc_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x ret <2 x double> %vh } -define <2 x double> @vfwnmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv2f64: +define <2 x double> @vfwnmacc_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_vf_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -933,8 +933,8 @@ define <2 x double> @vfwnmacc_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, floa ret <2 x double> %vh } -define <2 x double> @vfwnmacc_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv2f64: +define <2 x double> @vfwnmacc_fv_v2f64(<2 x double> %va, <2 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_fv_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v9 @@ -949,8 +949,8 @@ define <2 x double> @vfwnmacc_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, floa ret <2 x double> %vh } -define <2 x double> @vfwmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv2f64: +define <2 x double> @vfwmsac_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { +; CHECK-LABEL: vfwmsac_vv_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v9, v10 @@ -962,8 +962,8 @@ define <2 x double> @vfwmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x ret <2 x double> %vg } -define <2 x double> @vfwmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { -; CHECK-LABEL: vfwmsac_vf_nxv2f64: +define <2 x double> @vfwmsac_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) { +; CHECK-LABEL: vfwmsac_vf_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v9 @@ -977,8 +977,8 @@ define <2 x double> @vfwmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float ret <2 x double> %vg } -define <2 x double> @vfwnmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv2f64: +define <2 x double> @vfwnmsac_vv_v2f64(<2 x double> %va, <2 x float> %vb, <2 x float> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v9, v10 @@ -990,8 +990,8 @@ define <2 x double> @vfwnmsac_vv_nxv2f64(<2 x double> %va, <2 x float> %vb, <2 x ret <2 x double> %vg } -define <2 x double> @vfwnmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv2f64: +define <2 x double> @vfwnmsac_vf_v2f64(<2 x double> %va, <2 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_vf_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -1005,8 +1005,8 @@ define <2 x double> @vfwnmsac_vf_nxv2f64(<2 x double> %va, <2 x float> %vb, floa ret <2 x double> %vg } -define <2 x double> @vfwnmsac_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv2f64: +define <2 x double> @vfwnmsac_fv_v2f64(<2 x double> %va, <2 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_fv_v2f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 2, e32, mf2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v9 @@ -1023,8 +1023,8 @@ define <2 x double> @vfwnmsac_fv_nxv2f64(<2 x double> %va, <2 x float> %vb, floa declare <4 x double> @llvm.fma.v4f64(<4 x double>, <4 x double>, <4 x double>) -define <4 x double> @vfwmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv4f64: +define <4 x double> @vfwmacc_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { +; CHECK-LABEL: vfwmacc_vv_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v10, v11 @@ -1035,8 +1035,8 @@ define <4 x double> @vfwmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x ret <4 x double> %vf } -define <4 x double> @vfwmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { -; CHECK-LABEL: vfwmacc_vf_nxv4f64: +define <4 x double> @vfwmacc_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) { +; CHECK-LABEL: vfwmacc_vf_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v10 @@ -1049,8 +1049,8 @@ define <4 x double> @vfwmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float ret <4 x double> %vf } -define <4 x double> @vfwnmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv4f64: +define <4 x double> @vfwnmacc_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v10, v11 @@ -1063,8 +1063,8 @@ define <4 x double> @vfwnmacc_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x ret <4 x double> %vh } -define <4 x double> @vfwnmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv4f64: +define <4 x double> @vfwnmacc_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_vf_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 @@ -1079,8 +1079,8 @@ define <4 x double> @vfwnmacc_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, floa ret <4 x double> %vh } -define <4 x double> @vfwnmacc_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv4f64: +define <4 x double> @vfwnmacc_fv_v4f64(<4 x double> %va, <4 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_fv_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v10 @@ -1095,8 +1095,8 @@ define <4 x double> @vfwnmacc_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, floa ret <4 x double> %vh } -define <4 x double> @vfwmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv4f64: +define <4 x double> @vfwmsac_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { +; CHECK-LABEL: vfwmsac_vv_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v10, v11 @@ -1108,8 +1108,8 @@ define <4 x double> @vfwmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x ret <4 x double> %vg } -define <4 x double> @vfwmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { -; CHECK-LABEL: vfwmsac_vf_nxv4f64: +define <4 x double> @vfwmsac_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) { +; CHECK-LABEL: vfwmsac_vf_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v10 @@ -1123,8 +1123,8 @@ define <4 x double> @vfwmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float ret <4 x double> %vg } -define <4 x double> @vfwnmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv4f64: +define <4 x double> @vfwnmsac_vv_v4f64(<4 x double> %va, <4 x float> %vb, <4 x float> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v10, v11 @@ -1136,8 +1136,8 @@ define <4 x double> @vfwnmsac_vv_nxv4f64(<4 x double> %va, <4 x float> %vb, <4 x ret <4 x double> %vg } -define <4 x double> @vfwnmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv4f64: +define <4 x double> @vfwnmsac_vf_v4f64(<4 x double> %va, <4 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_vf_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 @@ -1151,8 +1151,8 @@ define <4 x double> @vfwnmsac_vf_nxv4f64(<4 x double> %va, <4 x float> %vb, floa ret <4 x double> %vg } -define <4 x double> @vfwnmsac_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv4f64: +define <4 x double> @vfwnmsac_fv_v4f64(<4 x double> %va, <4 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_fv_v4f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 4, e32, m1, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v10 @@ -1168,8 +1168,8 @@ define <4 x double> @vfwnmsac_fv_nxv4f64(<4 x double> %va, <4 x float> %vb, floa declare <8 x double> @llvm.fma.v8f64(<8 x double>, <8 x double>, <8 x double>) -define <8 x double> @vfwmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { -; CHECK-LABEL: vfwmacc_vv_nxv8f64: +define <8 x double> @vfwmacc_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { +; CHECK-LABEL: vfwmacc_vv_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmacc.vv v8, v12, v14 @@ -1180,8 +1180,8 @@ define <8 x double> @vfwmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x ret <8 x double> %vf } -define <8 x double> @vfwmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { -; CHECK-LABEL: vfwmacc_vf_nxv8f64: +define <8 x double> @vfwmacc_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) { +; CHECK-LABEL: vfwmacc_vf_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmacc.vf v8, fa0, v12 @@ -1194,8 +1194,8 @@ define <8 x double> @vfwmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float ret <8 x double> %vf } -define <8 x double> @vfwnmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { -; CHECK-LABEL: vfwnmacc_vv_nxv8f64: +define <8 x double> @vfwnmacc_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { +; CHECK-LABEL: vfwnmacc_vv_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vv v8, v12, v14 @@ -1208,8 +1208,8 @@ define <8 x double> @vfwnmacc_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x ret <8 x double> %vh } -define <8 x double> @vfwnmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_vf_nxv8f64: +define <8 x double> @vfwnmacc_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_vf_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 @@ -1224,8 +1224,8 @@ define <8 x double> @vfwnmacc_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, floa ret <8 x double> %vh } -define <8 x double> @vfwnmacc_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmacc_fv_nxv8f64: +define <8 x double> @vfwnmacc_fv_v8f64(<8 x double> %va, <8 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmacc_fv_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmacc.vf v8, fa0, v12 @@ -1240,8 +1240,8 @@ define <8 x double> @vfwnmacc_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, floa ret <8 x double> %vh } -define <8 x double> @vfwmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { -; CHECK-LABEL: vfwmsac_vv_nxv8f64: +define <8 x double> @vfwmsac_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { +; CHECK-LABEL: vfwmsac_vv_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmsac.vv v8, v12, v14 @@ -1253,8 +1253,8 @@ define <8 x double> @vfwmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x ret <8 x double> %vg } -define <8 x double> @vfwmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { -; CHECK-LABEL: vfwmsac_vf_nxv8f64: +define <8 x double> @vfwmsac_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) { +; CHECK-LABEL: vfwmsac_vf_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwmsac.vf v8, fa0, v12 @@ -1268,8 +1268,8 @@ define <8 x double> @vfwmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float ret <8 x double> %vg } -define <8 x double> @vfwnmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { -; CHECK-LABEL: vfwnmsac_vv_nxv8f64: +define <8 x double> @vfwnmsac_vv_v8f64(<8 x double> %va, <8 x float> %vb, <8 x float> %vc) { +; CHECK-LABEL: vfwnmsac_vv_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vv v8, v12, v14 @@ -1281,8 +1281,8 @@ define <8 x double> @vfwnmsac_vv_nxv8f64(<8 x double> %va, <8 x float> %vb, <8 x ret <8 x double> %vg } -define <8 x double> @vfwnmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_vf_nxv8f64: +define <8 x double> @vfwnmsac_vf_v8f64(<8 x double> %va, <8 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_vf_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 @@ -1296,8 +1296,8 @@ define <8 x double> @vfwnmsac_vf_nxv8f64(<8 x double> %va, <8 x float> %vb, floa ret <8 x double> %vg } -define <8 x double> @vfwnmsac_fv_nxv8f64(<8 x double> %va, <8 x float> %vb, float %c) { -; CHECK-LABEL: vfwnmsac_fv_nxv8f64: +define <8 x double> @vfwnmsac_fv_v8f64(<8 x double> %va, <8 x float> %vb, float %c) { +; CHECK-LABEL: vfwnmsac_fv_v8f64: ; CHECK: # %bb.0: ; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, ma ; CHECK-NEXT: vfwnmsac.vf v8, fa0, v12 diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll index 6de41fb..147558f 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp-mask.ll @@ -2,7 +2,7 @@ ; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s | FileCheck %s ; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s | FileCheck %s -declare @llvm.vp.trunc.nxv2i16.nxv2i1(, , i32) +declare @llvm.vp.trunc.nxv2i1.nxv2i16(, , i32) define @vtrunc_nxv2i1_nxv2i16( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i1_nxv2i16: @@ -12,7 +12,7 @@ define @vtrunc_nxv2i1_nxv2i16( %a, @llvm.vp.trunc.nxv2i16.nxv2i1( %a, %m, i32 %vl) + %v = call @llvm.vp.trunc.nxv2i1.nxv2i16( %a, %m, i32 %vl) ret %v } @@ -23,7 +23,7 @@ define @vtrunc_nxv2i1_nxv2i16_unmasked( %a, ; CHECK-NEXT: vand.vi v8, v8, 1 ; CHECK-NEXT: vmsne.vi v0, v8, 0 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i16.nxv2i1( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i1.nxv2i16( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v } diff --git a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll index a3fc9fd..a9c2a99 100644 --- a/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vtrunc-vp.ll @@ -187,7 +187,7 @@ define @vtrunc_nxv15i16_nxv15i64( %a, %v } -declare @llvm.vp.trunc.nxv2i64.nxv2i32(, , i32) +declare @llvm.vp.trunc.nxv2i32.nxv2i64(, , i32) define @vtrunc_nxv2i32_nxv2i64( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv2i32_nxv2i64: @@ -196,7 +196,7 @@ define @vtrunc_nxv2i32_nxv2i64( %a, @llvm.vp.trunc.nxv2i64.nxv2i32( %a, %m, i32 %vl) + %v = call @llvm.vp.trunc.nxv2i32.nxv2i64( %a, %m, i32 %vl) ret %v } @@ -207,7 +207,7 @@ define @vtrunc_nxv2i32_nxv2i64_unmasked( %a ; CHECK-NEXT: vnsrl.wi v10, v8, 0 ; CHECK-NEXT: vmv.v.v v8, v10 ; CHECK-NEXT: ret - %v = call @llvm.vp.trunc.nxv2i64.nxv2i32( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) + %v = call @llvm.vp.trunc.nxv2i32.nxv2i64( %a, shufflevector ( insertelement ( undef, i1 true, i32 0), undef, zeroinitializer), i32 %vl) ret %v } @@ -283,7 +283,7 @@ define @vtrunc_nxv32i8_nxv32i32( %a, %v } -declare @llvm.vp.trunc.nxv32i64.nxv32i32(, , i32) +declare @llvm.vp.trunc.nxv32i32.nxv32i64(, , i32) define @vtrunc_nxv32i64_nxv32i32( %a, %m, i32 zeroext %vl) { ; CHECK-LABEL: vtrunc_nxv32i64_nxv32i32: @@ -378,6 +378,6 @@ define @vtrunc_nxv32i64_nxv32i32( %a, @llvm.vp.trunc.nxv32i64.nxv32i32( %a, %m, i32 %vl) + %v = call @llvm.vp.trunc.nxv32i32.nxv32i64( %a, %m, i32 %vl) ret %v } -- 2.7.4